From 3c76e083557b7bf50922160ecda589554f30824a Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 20 Jul 2021 09:56:56 +0200 Subject: [PATCH 001/106] Initial proposal for a QsPasses structure --- src/QsPasses/CMakeLists.txt | 28 ++++++++++++++++++++++++++++ src/QsPasses/Makefile | 3 +++ src/QsPasses/README.md | 19 +++++++++++++++++++ src/QsPasses/docs/index.md | 3 +++ 4 files changed, 53 insertions(+) create mode 100644 src/QsPasses/CMakeLists.txt create mode 100644 src/QsPasses/Makefile create mode 100644 src/QsPasses/README.md create mode 100644 src/QsPasses/docs/index.md diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt new file mode 100644 index 0000000000..98d45c7ffa --- /dev/null +++ b/src/QsPasses/CMakeLists.txt @@ -0,0 +1,28 @@ +cmake_minimum_required(VERSION 3.4.3) + +project(QSharpPasses) + +find_package(LLVM REQUIRED CONFIG) + +message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") +message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wconversion -Wpedantic") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") + +include_directories(${LLVM_INCLUDE_DIRS}) +add_definitions(${LLVM_DEFINITIONS}) +include_directories(${CMAKE_SOURCE_DIR}/src) + +# LLVM uses RTTI by default - added here for consistency +if(NOT LLVM_ENABLE_RTTI) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") +endif() + +# The main libary +add_library(QSharpPasses SHARED src/GateCounter/GateCounter.cpp) +target_link_libraries(QSharpPasses + "$<$:-undefined dynamic_lookup>") diff --git a/src/QsPasses/Makefile b/src/QsPasses/Makefile new file mode 100644 index 0000000000..270dacab28 --- /dev/null +++ b/src/QsPasses/Makefile @@ -0,0 +1,3 @@ +clean: + rm -rf Release/ + rm -rf Debug/ \ No newline at end of file diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md new file mode 100644 index 0000000000..ce58881145 --- /dev/null +++ b/src/QsPasses/README.md @@ -0,0 +1,19 @@ +# Q# Passes for LLVM + +This subcomponent defines LLVM passes used for optimising and transforming the IR. + +## Getting started + +The Q# pass component is a dynamic library that can be compiled and ran separately from the +rest of the project code. + +## Dependencies + +This subcomponent is written in C++ and depends on: + +- LLVM +- + +## Building the passes + +To build the diff --git a/src/QsPasses/docs/index.md b/src/QsPasses/docs/index.md new file mode 100644 index 0000000000..08c1bfc9b7 --- /dev/null +++ b/src/QsPasses/docs/index.md @@ -0,0 +1,3 @@ +# Q# pass documentation + +This directory and file is a placeholder for describing LLVM passes which was already implemented. From 9eb5c027e5af82681ee7f4c8fbda35dcfeb73331 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 20 Jul 2021 09:57:27 +0200 Subject: [PATCH 002/106] Updating CMake --- src/QsPasses/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt index 98d45c7ffa..eb5eb53301 100644 --- a/src/QsPasses/CMakeLists.txt +++ b/src/QsPasses/CMakeLists.txt @@ -7,12 +7,14 @@ find_package(LLVM REQUIRED CONFIG) message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") +# Setting the standard for set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wconversion -Wpedantic") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") + include_directories(${LLVM_INCLUDE_DIRS}) add_definitions(${LLVM_DEFINITIONS}) include_directories(${CMAKE_SOURCE_DIR}/src) From 6674e248dd6e963045c290826a9d6bd922219305 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 20 Jul 2021 12:43:18 +0200 Subject: [PATCH 003/106] Adding CI stuff --- src/QsPasses/.clang-format | 57 +++++++ src/QsPasses/.clang-tidy | 22 +++ src/QsPasses/Makefile | 14 +- src/QsPasses/README.md | 62 ++++++- src/QsPasses/scripts/Builder/__init__.py | 0 src/QsPasses/scripts/FormatSource/__init__.py | 152 ++++++++++++++++++ src/QsPasses/scripts/ToolChain/__init__.py | 0 src/QsPasses/src/GateCounter/GateCounter.cpp | 64 ++++++++ src/QsPasses/src/Llvm.hpp | 37 +++++ 9 files changed, 401 insertions(+), 7 deletions(-) create mode 100644 src/QsPasses/.clang-format create mode 100644 src/QsPasses/.clang-tidy create mode 100644 src/QsPasses/scripts/Builder/__init__.py create mode 100644 src/QsPasses/scripts/FormatSource/__init__.py create mode 100644 src/QsPasses/scripts/ToolChain/__init__.py create mode 100644 src/QsPasses/src/GateCounter/GateCounter.cpp create mode 100644 src/QsPasses/src/Llvm.hpp diff --git a/src/QsPasses/.clang-format b/src/QsPasses/.clang-format new file mode 100644 index 0000000000..f44b5289b3 --- /dev/null +++ b/src/QsPasses/.clang-format @@ -0,0 +1,57 @@ +--- +BasedOnStyle: Google +Language: Cpp +AccessModifierOffset: -2 +AlignConsecutiveAssignments: true +AlignConsecutiveDeclarations: true +AlignTrailingComments: true +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: None +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +BraceWrapping: + AfterClass: true + AfterControlStatement: true + AfterEnum: true + AfterFunction: true + AfterNamespace: false + AfterStruct: true + AfterUnion: true + AfterExternBlock: true + BeforeCatch: true + BeforeElse: true + SplitEmptyFunction: false +BreakBeforeBraces: Custom +BreakConstructorInitializers: BeforeComma +ColumnLimit: 100 +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 2 +ContinuationIndentWidth: 4 +DerivePointerAlignment: false +IncludeBlocks: Regroup +IncludeCategories: + - Regex: '.*\.\..*' + Priority: 1 + - Regex: '^<.*\.h.*>$' + Priority: 5 + - Regex: '^<.*>$' + Priority: 6 + - Regex: '^"(gtest)|(gmock)|(benchmark)/.*"$' + Priority: 4 + - Regex: '.*/.*' + Priority: 3 + - Regex: '.*' + Priority: 2 +IncludeIsMainRegex: '' +IndentCaseLabels: false +IndentWidth: 2 +KeepEmptyLinesAtTheStartOfBlocks: true +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +PointerAlignment: Right +SortIncludes: true +SortUsingDeclarations: true +SpaceInEmptyParentheses: false +SpacesInAngles: false +Standard: Cpp11 +UseTab: Never diff --git a/src/QsPasses/.clang-tidy b/src/QsPasses/.clang-tidy new file mode 100644 index 0000000000..f86dcb3ccf --- /dev/null +++ b/src/QsPasses/.clang-tidy @@ -0,0 +1,22 @@ +Checks: "-*,\ +bugprone-*,\ +cert-dcl*,\ +cert-env*,\ +cert-err52-cpp,\ +cert-err60-cpp,\ +cert-flp30-c,\ +clang-analyzer-security.FloatLoopCounter,\ +google-build-explicit-make-pair,\ +google-build-namespaces,\ +google-explicit-constructor,\ +google-readability-*,\ +google-runtime-operator,\ +hicpp-exception-baseclass,\ +hicpp-explicit-conversions,\ +hicpp-use-*,\ +misc-*,\ +-misc-misplaced-widening-cast,\ +modernize-*,\ +performance-*,\ +readability-*,\ +-readability-identifier-naming" \ No newline at end of file diff --git a/src/QsPasses/Makefile b/src/QsPasses/Makefile index 270dacab28..b5709e0fa4 100644 --- a/src/QsPasses/Makefile +++ b/src/QsPasses/Makefile @@ -1,3 +1,13 @@ +stylecheck: + @python scripts/FormatSource/__init__.py + +lint: + @echo "Static analysis not added yet" + +tests: + mkdir Debug + cd Debug && cmake .. && make -j4 && ctest + clean: - rm -rf Release/ - rm -rf Debug/ \ No newline at end of file + rm -rf Release/ + rm -rf Debug/ diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index ce58881145..85a3d5e1a3 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -1,19 +1,71 @@ # Q# Passes for LLVM -This subcomponent defines LLVM passes used for optimising and transforming the IR. +This library defines LLVM passes used for optimising and transforming the IR. ## Getting started -The Q# pass component is a dynamic library that can be compiled and ran separately from the +The Q# pass library is a dynamic library that can be compiled and ran separately from the rest of the project code. ## Dependencies -This subcomponent is written in C++ and depends on: +This library is written in C++ and depends on: - LLVM -- + +Additional development dependencies include: + +- CMake +- clang-format +- clang-tidy ## Building the passes -To build the +To build the passes, create a new build directory and switch to that directory: + +```sh +mkdir Debug +cd Debug/ +``` + +To build the library, first configure CMake from the build directory + +```sh +cmake .. +``` + +and then make your target + +```sh +make [target] +``` + +## Running a pass + +Yet to be written + +## CI + +Before making a pull request with changes to this library, please ensure that style checks passes, that the code compiles, +unit test passes and that there are no erros found by the static analyser. + +To check the style, run + +```sh +make stylecheck +``` + +To test that the code compiles and tests passes run + +```sh +make tests +``` + +Finally, to analyse the code, run + +```sh +make lint +``` + +As `clang-tidy` and `clang-format` acts slightly different from version to version and on different platforms, it is recommended +that you use a docker image to perform these steps. diff --git a/src/QsPasses/scripts/Builder/__init__.py b/src/QsPasses/scripts/Builder/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/QsPasses/scripts/FormatSource/__init__.py b/src/QsPasses/scripts/FormatSource/__init__.py new file mode 100644 index 0000000000..f8f6e3929e --- /dev/null +++ b/src/QsPasses/scripts/FormatSource/__init__.py @@ -0,0 +1,152 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from os import path +import os +import logging +import subprocess + + +def discover_formatter(): + # TODO(TFR): Auto discover, use full path + return "clang-format" + + +logger = logging.getLogger() +PROJECT_ROOT = path.abspath(path.dirname(path.dirname(path.dirname(__file__)))) +CLANG_FORMAT_EXE = discover_formatter() + +####### +# Style pipeline components + + +def require_token(token, filename, contents, cursor, dry_run): + failed = False + if not contents[cursor:].startswith(token): + logger.error("{}: File must have {} at position {}".format(filename, token, cursor)) + failed = True + return cursor + len(token), failed + + +def require_pragma_once(filename, contents, cursor, dry_run): + return require_token("#pragma once\n", filename, contents, cursor, dry_run) + + +def require_todo_owner(filename, contents, cursor, dry_run): + # TODO(tfr): implement + return cursor, False + + +def enforce_cpp_license(filename, contents, cursor, dry_run): + return require_token("""// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +""", filename, contents, cursor, dry_run) + + +def enforce_py_license(filename, contents, cursor, dry_run): + # Allowing empty files + if contents.strip() == "": + return cursor, False + + return require_token("""# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +""", filename, contents, cursor, dry_run) + + +def enforce_formatting(filename, contents, cursor, dry_run): + p = subprocess.Popen( + [CLANG_FORMAT_EXE, '-style=file'], + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + cwd=PROJECT_ROOT) + output = p.communicate(input=contents.encode())[0] + + if p.returncode != 0: + raise Exception('Could not format contents') + + formatted = output.decode('utf-8') + if formatted != contents: + logger.error("{} was not correctly formatted.".format(filename)) + + return cursor, False + + +####### +# Source pipeline definitions + + +AUTO_FORMAT_LANGUAGES = [ + { + "name": "C++ Main", + "src": path.join(PROJECT_ROOT, "src"), + + "pipelines": { + "hpp": [ + require_pragma_once, + enforce_cpp_license, + enforce_formatting + ], + "cpp": [ + enforce_cpp_license, + enforce_formatting + ] + } + }, + { + "name": "Scripts", + "src": path.join(PROJECT_ROOT, "scripts"), + + "pipelines": { + "py": [ + enforce_py_license, + ], + } + } +] + + +def execute_pipeline(pipeline, filename: str, dry_run: bool): + logger.info("Executing pipeline for {}".format(filename)) + cursor = 0 + + with open(filename, "r") as fb: + contents = fb.read() + + failed = False + for fnc in pipeline: + cursor, f = fnc(filename, contents, cursor, dry_run) + failed = failed or f + + return failed + + +def main(dry_run: bool = True): + failed = False + + for language in AUTO_FORMAT_LANGUAGES: + logger.info("Formatting {}".format(language["name"])) + basedir = language["src"] + pipelines = language["pipelines"] + + for root, dirs, files in os.walk(basedir): + + for filename in files: + if "." not in filename: + continue + + _, ext = filename.rsplit(".", 1) + if ext in pipelines: + f = execute_pipeline(pipelines[ext], path.join(root, filename), dry_run) + failed = failed or f + + if failed: + logger.error("Your code did not pass formatting.") + + return failed + + +if __name__ == "__main__": + if main(): + exit(-1) diff --git a/src/QsPasses/scripts/ToolChain/__init__.py b/src/QsPasses/scripts/ToolChain/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/QsPasses/src/GateCounter/GateCounter.cpp b/src/QsPasses/src/GateCounter/GateCounter.cpp new file mode 100644 index 0000000000..540a85e626 --- /dev/null +++ b/src/QsPasses/src/GateCounter/GateCounter.cpp @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "Llvm.hpp" + +using namespace llvm; + +namespace { + +void visitor(Function &F) +{ + errs() << "(gate-counter) " << F.getName() << "\n"; + errs() << "(gate-counter) number of arguments: " << F.arg_size() << "\n"; +} + +struct GateCounterPass : PassInfoMixin +{ + PreservedAnalyses run(Function &F, FunctionAnalysisManager &) + { + visitor(F); + + return PreservedAnalyses::all(); + } +}; + +struct LegacyGateCounterPass : public FunctionPass +{ + static char ID; + LegacyGateCounterPass() + : FunctionPass(ID) + {} + + bool runOnFunction(Function &F) override + { + visitor(F); + return false; + } +}; +} // namespace + +llvm::PassPluginLibraryInfo getGateCounterPluginInfo() +{ + return {LLVM_PLUGIN_API_VERSION, "GateCounter", LLVM_VERSION_STRING, [](PassBuilder &PB) { + PB.registerPipelineParsingCallback([](StringRef Name, FunctionPassManager &FPM, + ArrayRef) { + if (Name == "gate-counter") + { + FPM.addPass(GateCounterPass()); + return true; + } + return false; + }); + }}; +} + +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return getGateCounterPluginInfo(); +} + +char LegacyGateCounterPass::ID = 0; +static RegisterPass LegacyGateCounterRegistration("legacy-gate-counter", + "Gate Counter Pass", true, + false); diff --git a/src/QsPasses/src/Llvm.hpp b/src/QsPasses/src/Llvm.hpp new file mode 100644 index 0000000000..408ad61cdf --- /dev/null +++ b/src/QsPasses/src/Llvm.hpp @@ -0,0 +1,37 @@ +#pragma once +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#if defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#pragma GCC diagnostic ignored "-Wpedantic" +#pragma GCC diagnostic ignored "-Wunused-value" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wunknown-warning-option" +#pragma GCC diagnostic ignored "-Wunused-parameter" +#endif + +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wconversion" +#pragma clang diagnostic ignored "-Wpedantic" +#pragma clang diagnostic ignored "-Werror" +#pragma clang diagnostic ignored "-Wshadow" +#pragma clang diagnostic ignored "-Wreturn-std-move" +#pragma clang diagnostic ignored "-Wunknown-warning-option" +#pragma clang diagnostic ignored "-Wunused-parameter" +#endif + +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/Passes/PassBuilder.h" +#include "llvm/Passes/PassPlugin.h" +#include "llvm/Support/raw_ostream.h" + +#if defined(__clang__) +#pragma clang diagnostic pop +#endif + +#if defined(__GNUC__) +#pragma GCC diagnostic pop +#endif \ No newline at end of file From 199eed46f09e14fb014b9596cfa9ede41fea1c32 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 20 Jul 2021 14:34:20 +0200 Subject: [PATCH 004/106] Making CLI interface for CI tasks --- src/QsPasses/Makefile | 7 +- src/QsPasses/README.md | 11 +++ src/QsPasses/develop.env | 3 + src/QsPasses/requirements.txt | 1 + src/QsPasses/scripts/Builder/__init__.py | 0 src/QsPasses/scripts/ToolChain/__init__.py | 0 .../site-packages/PassCI/Builder/__init__.py | 73 +++++++++++++++++++ .../PassCI}/FormatSource/__init__.py | 10 +-- src/QsPasses/site-packages/PassCI/Project.py | 13 ++++ .../PassCI/ToolChain/__init__.py | 8 ++ src/QsPasses/site-packages/PassCI/__main__.py | 68 +++++++++++++++++ 11 files changed, 184 insertions(+), 10 deletions(-) create mode 100644 src/QsPasses/develop.env create mode 100644 src/QsPasses/requirements.txt delete mode 100644 src/QsPasses/scripts/Builder/__init__.py delete mode 100644 src/QsPasses/scripts/ToolChain/__init__.py create mode 100644 src/QsPasses/site-packages/PassCI/Builder/__init__.py rename src/QsPasses/{scripts => site-packages/PassCI}/FormatSource/__init__.py (93%) create mode 100644 src/QsPasses/site-packages/PassCI/Project.py create mode 100644 src/QsPasses/site-packages/PassCI/ToolChain/__init__.py create mode 100644 src/QsPasses/site-packages/PassCI/__main__.py diff --git a/src/QsPasses/Makefile b/src/QsPasses/Makefile index b5709e0fa4..324c136291 100644 --- a/src/QsPasses/Makefile +++ b/src/QsPasses/Makefile @@ -1,12 +1,13 @@ stylecheck: - @python scripts/FormatSource/__init__.py + @source develop.env && python -m PassCI --stylecheck + + lint: @echo "Static analysis not added yet" tests: - mkdir Debug - cd Debug && cmake .. && make -j4 && ctest + @source develop.env && python -m PassCI test clean: rm -rf Release/ diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index 85a3d5e1a3..65f9ef8206 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -49,6 +49,17 @@ Yet to be written Before making a pull request with changes to this library, please ensure that style checks passes, that the code compiles, unit test passes and that there are no erros found by the static analyser. +To setup the CI environment, run following commands + +```sh +source develop.env +virtualenv develop__venv +source develop__venv/bin/activate +pip install -r requirements.txt +``` + +These adds the necessary environment variables to ensure that you have the `PassCI` package and all required dependencies. + To check the style, run ```sh diff --git a/src/QsPasses/develop.env b/src/QsPasses/develop.env new file mode 100644 index 0000000000..002d66b4ee --- /dev/null +++ b/src/QsPasses/develop.env @@ -0,0 +1,3 @@ +#!/bin/sh + +export PYTHONPATH=$PYTHONPATH:$PWD/site-packages \ No newline at end of file diff --git a/src/QsPasses/requirements.txt b/src/QsPasses/requirements.txt new file mode 100644 index 0000000000..77c1d85ae8 --- /dev/null +++ b/src/QsPasses/requirements.txt @@ -0,0 +1 @@ +click==8.0.1 diff --git a/src/QsPasses/scripts/Builder/__init__.py b/src/QsPasses/scripts/Builder/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/QsPasses/scripts/ToolChain/__init__.py b/src/QsPasses/scripts/ToolChain/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/QsPasses/site-packages/PassCI/Builder/__init__.py b/src/QsPasses/site-packages/PassCI/Builder/__init__.py new file mode 100644 index 0000000000..9b77c7035d --- /dev/null +++ b/src/QsPasses/site-packages/PassCI/Builder/__init__.py @@ -0,0 +1,73 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os +from .. import Project +from ..Project import PROJECT_ROOT +import logging +import subprocess +import sys + +logger = logging.getLogger() + + +def configure_cmake(build_dir: str, generator=None): + + logger.info("Source: {}".format(PROJECT_ROOT)) + logger.info("Build : {}".format(build_dir)) + + os.chdir(PROJECT_ROOT) + os.makedirs(build_dir, exist_ok=True) + + cmake_cmd = ['cmake'] # TODO: get from toolchain + + if generator is not None: + cmake_cmd += ['-G', generator] + + cmake_cmd += [PROJECT_ROOT] + + exit_code = subprocess.call(cmake_cmd, cwd=build_dir) + if exit_code != 0: + logger.error('Failed to configure project') + sys.exit(exit_code) + + +def build_project(build_dir: str, generator=None, concurrency=None): + + if generator in ["make", None]: + cmd = ["make"] + elif generator in ["ninja"]: + cmd = ["ninja"] + + if concurrency is None: + concurrency = Project.get_concurrency() + + cmd.append('-j{}'.format(concurrency)) + + exit_code = subprocess.call(cmd, cwd=build_dir) + + if exit_code != 0: + logger.error('Failed to make the project') + sys.exit(exit_code) + + +def run_tests(build_dir: str, concurrency=None): + cmake_cmd = ['ctest'] # TODO: get from toolchain + + if concurrency is not None: + raise BaseException("No support for concurrent testing at the moment.") + + exit_code = subprocess.call(cmake_cmd, cwd=build_dir) + if exit_code != 0: + logger.error('Failed to configure project') + sys.exit(exit_code) + + +def main(build_dir: str, generator=None, test: bool = False): + + configure_cmake(build_dir, generator) + + build_project(build_dir, generator) + + if test: + run_tests(build_dir) diff --git a/src/QsPasses/scripts/FormatSource/__init__.py b/src/QsPasses/site-packages/PassCI/FormatSource/__init__.py similarity index 93% rename from src/QsPasses/scripts/FormatSource/__init__.py rename to src/QsPasses/site-packages/PassCI/FormatSource/__init__.py index f8f6e3929e..299a53cb70 100644 --- a/src/QsPasses/scripts/FormatSource/__init__.py +++ b/src/QsPasses/site-packages/PassCI/FormatSource/__init__.py @@ -6,14 +6,10 @@ import logging import subprocess - -def discover_formatter(): - # TODO(TFR): Auto discover, use full path - return "clang-format" - +from ..Project import PROJECT_ROOT +from ..ToolChain import discover_formatter logger = logging.getLogger() -PROJECT_ROOT = path.abspath(path.dirname(path.dirname(path.dirname(__file__)))) CLANG_FORMAT_EXE = discover_formatter() ####### @@ -96,7 +92,7 @@ def enforce_formatting(filename, contents, cursor, dry_run): }, { "name": "Scripts", - "src": path.join(PROJECT_ROOT, "scripts"), + "src": path.join(PROJECT_ROOT, "site-packages"), "pipelines": { "py": [ diff --git a/src/QsPasses/site-packages/PassCI/Project.py b/src/QsPasses/site-packages/PassCI/Project.py new file mode 100644 index 0000000000..ef4ba177cd --- /dev/null +++ b/src/QsPasses/site-packages/PassCI/Project.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from os import path +import multiprocessing + +PROJECT_ROOT = path.abspath(path.dirname(path.dirname(path.dirname(__file__)))) + +MAX_CONCURRENCY = 7 + + +def get_concurrency(): + return min(MAX_CONCURRENCY, multiprocessing.cpu_count()) diff --git a/src/QsPasses/site-packages/PassCI/ToolChain/__init__.py b/src/QsPasses/site-packages/PassCI/ToolChain/__init__.py new file mode 100644 index 0000000000..7cb807a69b --- /dev/null +++ b/src/QsPasses/site-packages/PassCI/ToolChain/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import shutil + + +def discover_formatter(): + return shutil.which("clang-format") diff --git a/src/QsPasses/site-packages/PassCI/__main__.py b/src/QsPasses/site-packages/PassCI/__main__.py new file mode 100644 index 0000000000..9a9e74fc01 --- /dev/null +++ b/src/QsPasses/site-packages/PassCI/__main__.py @@ -0,0 +1,68 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +from .FormatSource import main as style_check_main +from .Builder import main as builder_main + +import click +import logging +import sys + +logger = logging.getLogger() + +# Logging configuration +ch = logging.StreamHandler() +ch.setLevel(logging.DEBUG) +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +ch.setFormatter(formatter) +logger.addHandler(ch) + +# By default we only log errors +logger.setLevel(logging.ERROR) + + +@click.group() +@click.option('--loglevel', default="error") +def cli(loglevel): + levels = { + "critical": 50, + "error": 40, + "warning": 30, + "info": 20, + "debug": 10, + "notset": 0 + } + + loglevel = loglevel.lower() + if loglevel not in levels: + logger.critical("Invalid log level") + sys.exit(-1) + + logger.setLevel(levels[loglevel]) + logger.info("Loglevel set to {}".format(loglevel)) + + +@cli.command() +@click.option('--fix-issues/--no-fix-issues', default=False) +def style_check(fix_issues): + logger.info("Invoking style checker") + + style_check_main() + + +@cli.command() +@click.option('--debug/--no-debug', default=True) +@click.option('--generator', default=None) +def test(debug, generator): + logger.info("Building and testing") + + build_dir = "Debug" + if not debug: + build_dir = "Release" + + builder_main(build_dir, generator, True) + + +if __name__ == '__main__': + cli() From de323ef36321ce600e7b638d6239f64669143412 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 11:28:59 +0200 Subject: [PATCH 005/106] Finishing V1 of CI script with updated clang tidy and format --- src/QsPasses/.clang-format | 81 ++++++------- src/QsPasses/.clang-tidy | 38 +++++- src/QsPasses/CMakeLists.txt | 2 + src/QsPasses/Makefile | 12 +- src/QsPasses/README.md | 6 +- .../{PassCI => TasksCI}/Builder/__init__.py | 0 .../FormatSource/__init__.py | 44 ++++--- .../site-packages/TasksCI/Linting/__init__.py | 110 ++++++++++++++++++ .../{PassCI => TasksCI}/Project.py | 0 .../{PassCI => TasksCI}/ToolChain/__init__.py | 4 + .../{PassCI => TasksCI}/__main__.py | 35 +++++- src/QsPasses/src/GateCounter/GateCounter.cpp | 81 +++++++------ 12 files changed, 304 insertions(+), 109 deletions(-) rename src/QsPasses/site-packages/{PassCI => TasksCI}/Builder/__init__.py (100%) rename src/QsPasses/site-packages/{PassCI => TasksCI}/FormatSource/__init__.py (73%) create mode 100644 src/QsPasses/site-packages/TasksCI/Linting/__init__.py rename src/QsPasses/site-packages/{PassCI => TasksCI}/Project.py (100%) rename src/QsPasses/site-packages/{PassCI => TasksCI}/ToolChain/__init__.py (74%) rename src/QsPasses/site-packages/{PassCI => TasksCI}/__main__.py (56%) diff --git a/src/QsPasses/.clang-format b/src/QsPasses/.clang-format index f44b5289b3..329e8f956d 100644 --- a/src/QsPasses/.clang-format +++ b/src/QsPasses/.clang-format @@ -1,57 +1,60 @@ +# https://clang.llvm.org/docs/ClangFormatStyleOptions.html + --- -BasedOnStyle: Google Language: Cpp +BasedOnStyle: Microsoft + +# page width +ColumnLimit: 120 +ReflowComments: true + +# tabs and indents +UseTab: Never +IndentWidth: 4 +TabWidth: 4 AccessModifierOffset: -2 +NamespaceIndentation: Inner + +# line and statements layout +BreakBeforeBraces: Allman +BinPackParameters: false +AlignAfterOpenBracket: AlwaysBreak +AllowShortIfStatementsOnASingleLine: WithoutElse +AllowShortFunctionsOnASingleLine: Empty +AllowAllConstructorInitializersOnNextLine: false +AllowAllArgumentsOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: false +BreakBeforeTernaryOperators: true +BreakConstructorInitializers: BeforeComma + +# misc +Cpp11BracedListStyle: true +FixNamespaceComments: true +IncludeBlocks: Preserve +SpaceBeforeInheritanceColon : true +SpaceBeforeParens: ControlStatements +DerivePointerAlignment: false +PointerAlignment: Left + +# Suggestion +Standard: Cpp11 AlignConsecutiveAssignments: true AlignConsecutiveDeclarations: true AlignTrailingComments: true -AllowShortCaseLabelsOnASingleLine: false -AllowShortFunctionsOnASingleLine: None -AllowShortIfStatementsOnASingleLine: false -AllowShortLoopsOnASingleLine: false -BraceWrapping: - AfterClass: true - AfterControlStatement: true - AfterEnum: true - AfterFunction: true - AfterNamespace: false - AfterStruct: true - AfterUnion: true - AfterExternBlock: true - BeforeCatch: true - BeforeElse: true - SplitEmptyFunction: false -BreakBeforeBraces: Custom -BreakConstructorInitializers: BeforeComma -ColumnLimit: 100 -ConstructorInitializerAllOnOneLineOrOnePerLine: false -ConstructorInitializerIndentWidth: 2 -ContinuationIndentWidth: 4 -DerivePointerAlignment: false -IncludeBlocks: Regroup -IncludeCategories: + +# Ensures include compleness +IncludeCategories: - Regex: '.*\.\..*' Priority: 1 - Regex: '^<.*\.h.*>$' Priority: 5 - Regex: '^<.*>$' Priority: 6 - - Regex: '^"(gtest)|(gmock)|(benchmark)/.*"$' + - Regex: '^"(llvm)/.*"$' Priority: 4 - Regex: '.*/.*' Priority: 3 - Regex: '.*' Priority: 2 IncludeIsMainRegex: '' -IndentCaseLabels: false -IndentWidth: 2 -KeepEmptyLinesAtTheStartOfBlocks: true -MaxEmptyLinesToKeep: 1 -NamespaceIndentation: None -PointerAlignment: Right -SortIncludes: true -SortUsingDeclarations: true -SpaceInEmptyParentheses: false -SpacesInAngles: false -Standard: Cpp11 -UseTab: Never + diff --git a/src/QsPasses/.clang-tidy b/src/QsPasses/.clang-tidy index f86dcb3ccf..d1f58c04c2 100644 --- a/src/QsPasses/.clang-tidy +++ b/src/QsPasses/.clang-tidy @@ -1,5 +1,7 @@ -Checks: "-*,\ -bugprone-*,\ +Checks: "-*,bugprone-*,\ +-readability-*,\ +readability-identifier-*,\ +readability-braces-around-statements,\ cert-dcl*,\ cert-env*,\ cert-err52-cpp,\ @@ -16,7 +18,31 @@ hicpp-explicit-conversions,\ hicpp-use-*,\ misc-*,\ -misc-misplaced-widening-cast,\ -modernize-*,\ -performance-*,\ -readability-*,\ --readability-identifier-naming" \ No newline at end of file +performance-*" + +WarningsAsErrors: '*' +HeaderFilterRegex: '.*' + +CheckOptions: + - key: readability-identifier-naming.ClassCase + value: 'CamelCase' + - key: readability-identifier-naming.ClassPrefix + value: 'C' + - key: readability-identifier-naming.AbstractClassPrefix + value: 'I' + - key: readability-identifier-naming.StructCase + value: 'CamelCase' + - key: readability-identifier-naming.ParameterCase + value: 'camelBack' + - key: readability-identifier-naming.PrivateMemberCase + value: 'camelBack' + - key: readability-identifier-naming.LocalVariableCase + value: 'camelBack' + - key: readability-identifier-naming.TypeAliasCase + value: 'CamelCase' + - key: readability-identifier-naming.UnionCase + value: 'CamelCase' + - key: readability-identifier-naming.FunctionCase + value: 'CamelCase' + - key: readability-identifier-naming.NamespaceCase + value: 'CamelCase' diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt index eb5eb53301..4135da0dd6 100644 --- a/src/QsPasses/CMakeLists.txt +++ b/src/QsPasses/CMakeLists.txt @@ -14,6 +14,8 @@ set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wconversion -Wpedantic") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") +# Needed for clang-tidy +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) include_directories(${LLVM_INCLUDE_DIRS}) add_definitions(${LLVM_DEFINITIONS}) diff --git a/src/QsPasses/Makefile b/src/QsPasses/Makefile index 324c136291..0a281789aa 100644 --- a/src/QsPasses/Makefile +++ b/src/QsPasses/Makefile @@ -1,14 +1,18 @@ stylecheck: - @source develop.env && python -m PassCI --stylecheck - + @source develop.env && python -m TasksCI stylecheck +fixstyle: + @source develop.env && python -m TasksCI --loglevel warning stylecheck --fix-issues lint: - @echo "Static analysis not added yet" + @source develop.env && python -m TasksCI lint tests: - @source develop.env && python -m PassCI test + @source develop.env && python -m TasksCI test + clean: rm -rf Release/ rm -rf Debug/ + +runci: stylecheck lint tests clean diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index 65f9ef8206..b9a2728355 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -58,7 +58,7 @@ source develop__venv/bin/activate pip install -r requirements.txt ``` -These adds the necessary environment variables to ensure that you have the `PassCI` package and all required dependencies. +These adds the necessary environment variables to ensure that you have the `TasksCI` package and all required dependencies. To check the style, run @@ -80,3 +80,7 @@ make lint As `clang-tidy` and `clang-format` acts slightly different from version to version and on different platforms, it is recommended that you use a docker image to perform these steps. + +# TODOs + +Look at https://github.com/llvm-mirror/clang-tools-extra/blob/master/clang-tidy/tool/run-clang-tidy.py diff --git a/src/QsPasses/site-packages/PassCI/Builder/__init__.py b/src/QsPasses/site-packages/TasksCI/Builder/__init__.py similarity index 100% rename from src/QsPasses/site-packages/PassCI/Builder/__init__.py rename to src/QsPasses/site-packages/TasksCI/Builder/__init__.py diff --git a/src/QsPasses/site-packages/PassCI/FormatSource/__init__.py b/src/QsPasses/site-packages/TasksCI/FormatSource/__init__.py similarity index 73% rename from src/QsPasses/site-packages/PassCI/FormatSource/__init__.py rename to src/QsPasses/site-packages/TasksCI/FormatSource/__init__.py index 299a53cb70..13f5a4e72b 100644 --- a/src/QsPasses/site-packages/PassCI/FormatSource/__init__.py +++ b/src/QsPasses/site-packages/TasksCI/FormatSource/__init__.py @@ -5,18 +5,19 @@ import os import logging import subprocess +import sys from ..Project import PROJECT_ROOT from ..ToolChain import discover_formatter -logger = logging.getLogger() +logger = logging.getLogger("FormatChecker") CLANG_FORMAT_EXE = discover_formatter() ####### # Style pipeline components -def require_token(token, filename, contents, cursor, dry_run): +def require_token(token, filename, contents, cursor, fix_issues): failed = False if not contents[cursor:].startswith(token): logger.error("{}: File must have {} at position {}".format(filename, token, cursor)) @@ -24,23 +25,23 @@ def require_token(token, filename, contents, cursor, dry_run): return cursor + len(token), failed -def require_pragma_once(filename, contents, cursor, dry_run): - return require_token("#pragma once\n", filename, contents, cursor, dry_run) +def require_pragma_once(filename, contents, cursor, fix_issues): + return require_token("#pragma once\n", filename, contents, cursor, fix_issues) -def require_todo_owner(filename, contents, cursor, dry_run): +def require_todo_owner(filename, contents, cursor, fix_issues): # TODO(tfr): implement return cursor, False -def enforce_cpp_license(filename, contents, cursor, dry_run): +def enforce_cpp_license(filename, contents, cursor, fix_issues): return require_token("""// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -""", filename, contents, cursor, dry_run) +""", filename, contents, cursor, fix_issues) -def enforce_py_license(filename, contents, cursor, dry_run): +def enforce_py_license(filename, contents, cursor, fix_issues): # Allowing empty files if contents.strip() == "": return cursor, False @@ -48,10 +49,10 @@ def enforce_py_license(filename, contents, cursor, dry_run): return require_token("""# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -""", filename, contents, cursor, dry_run) +""", filename, contents, cursor, fix_issues) -def enforce_formatting(filename, contents, cursor, dry_run): +def enforce_formatting(filename, contents, cursor, fix_issues): p = subprocess.Popen( [CLANG_FORMAT_EXE, '-style=file'], stdout=subprocess.PIPE, @@ -64,7 +65,16 @@ def enforce_formatting(filename, contents, cursor, dry_run): formatted = output.decode('utf-8') if formatted != contents: + + # Updating the contents of the file + if fix_issues: + logger.info("Formatting {}".format(filename)) + with open(filename, "w") as filebuffer: + filebuffer.write(formatted) + return cursor, False + logger.error("{} was not correctly formatted.".format(filename)) + return cursor, True return cursor, False @@ -103,7 +113,7 @@ def enforce_formatting(filename, contents, cursor, dry_run): ] -def execute_pipeline(pipeline, filename: str, dry_run: bool): +def execute_pipeline(pipeline, filename: str, fix_issues: bool): logger.info("Executing pipeline for {}".format(filename)) cursor = 0 @@ -112,13 +122,13 @@ def execute_pipeline(pipeline, filename: str, dry_run: bool): failed = False for fnc in pipeline: - cursor, f = fnc(filename, contents, cursor, dry_run) + cursor, f = fnc(filename, contents, cursor, fix_issues) failed = failed or f return failed -def main(dry_run: bool = True): +def main(fix_issues: bool = False): failed = False for language in AUTO_FORMAT_LANGUAGES: @@ -134,15 +144,13 @@ def main(dry_run: bool = True): _, ext = filename.rsplit(".", 1) if ext in pipelines: - f = execute_pipeline(pipelines[ext], path.join(root, filename), dry_run) + f = execute_pipeline(pipelines[ext], path.join(root, filename), fix_issues) failed = failed or f if failed: logger.error("Your code did not pass formatting.") - - return failed + sys.exit(-1) if __name__ == "__main__": - if main(): - exit(-1) + main() diff --git a/src/QsPasses/site-packages/TasksCI/Linting/__init__.py b/src/QsPasses/site-packages/TasksCI/Linting/__init__.py new file mode 100644 index 0000000000..538a0bfa07 --- /dev/null +++ b/src/QsPasses/site-packages/TasksCI/Linting/__init__.py @@ -0,0 +1,110 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import logging +from ..Builder import configure_cmake, build_project +from .. import ToolChain +from ..Project import PROJECT_ROOT +import os +import subprocess +import sys + +logger = logging.getLogger("Linter") + + +def clang_tidy_diagnose(): + config = subprocess.check_output( + [ToolChain.discover_tidy(), '-dump-config'], cwd=PROJECT_ROOT).decode() + + check_list = subprocess.check_output( + [ToolChain.discover_tidy(), '-list-checks'], cwd=PROJECT_ROOT).decode() + + checks = [x.strip() for x in check_list.split("\n") if '-' in x] + + print("Working directory: {}".format(PROJECT_ROOT)) + print("") + print(config) + print("") + print("Clang tidy checks:") + for check in sorted(checks): + print(" -", check) + + +def run_clang_tidy(source_dir, build_dir, filename, fix_issues: bool = False): + clang_tidy_binary = ToolChain.discover_tidy() + + cmd = [clang_tidy_binary] + output_file = os.path.abspath(os.path.join(build_dir, 'clang_tidy_fixes.yaml')) + + cmd.append('-header-filter=".*(QsPasses)\\/(src).*\\.hpp$"') + cmd.append('-p=' + build_dir) + cmd.append('-export-fixes={}'.format(output_file)) + cmd.append('--use-color') + + if fix_issues: + cmd.append("-fix") + + cmd.append(filename) + + p = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=PROJECT_ROOT) + + output, err = p.communicate() + + if p.returncode != 0: + output = output.decode() + err = err.decode() + + if "error" in err: + # TODO(TFR): write output and errors to temp log file + sys.stderr.write(output) + sys.stderr.write(err) + + logger.error("{} failed static analysis".format(filename)) + return False + + logger.info("All good!") + return True + + +def main_cpp(fix_issues: bool): + logger.info("Linting") + build_dir = os.path.join(PROJECT_ROOT, "Debug") + source_dir = os.path.join(PROJECT_ROOT, "src") + generator = None + extensions = ["cpp"] + + # Configuring CMake + configure_cmake(build_dir, generator) + + # Building + build_project(build_dir, generator) + + # Generating list of files + # TODO(TFR): Ensure that it is only those which were changed that are + # analysed + files_to_analyse = [] + + for root, dirs, files in os.walk(source_dir): + + for filename in files: + if "." not in filename: + continue + + _, ext = filename.rsplit(".", 1) + if ext in extensions: + files_to_analyse.append(os.path.join(root, filename)) + + success = True + for filename in files_to_analyse: + success = success and run_clang_tidy(source_dir, build_dir, filename, fix_issues=fix_issues) + return success + + +def main(fix_issues: bool): + if not main_cpp(fix_issues): + sys.exit(-1) diff --git a/src/QsPasses/site-packages/PassCI/Project.py b/src/QsPasses/site-packages/TasksCI/Project.py similarity index 100% rename from src/QsPasses/site-packages/PassCI/Project.py rename to src/QsPasses/site-packages/TasksCI/Project.py diff --git a/src/QsPasses/site-packages/PassCI/ToolChain/__init__.py b/src/QsPasses/site-packages/TasksCI/ToolChain/__init__.py similarity index 74% rename from src/QsPasses/site-packages/PassCI/ToolChain/__init__.py rename to src/QsPasses/site-packages/TasksCI/ToolChain/__init__.py index 7cb807a69b..f666565860 100644 --- a/src/QsPasses/site-packages/PassCI/ToolChain/__init__.py +++ b/src/QsPasses/site-packages/TasksCI/ToolChain/__init__.py @@ -6,3 +6,7 @@ def discover_formatter(): return shutil.which("clang-format") + + +def discover_tidy(): + return shutil.which("clang-tidy") diff --git a/src/QsPasses/site-packages/PassCI/__main__.py b/src/QsPasses/site-packages/TasksCI/__main__.py similarity index 56% rename from src/QsPasses/site-packages/PassCI/__main__.py rename to src/QsPasses/site-packages/TasksCI/__main__.py index 9a9e74fc01..947c49bf7b 100644 --- a/src/QsPasses/site-packages/PassCI/__main__.py +++ b/src/QsPasses/site-packages/TasksCI/__main__.py @@ -4,6 +4,7 @@ from .FormatSource import main as style_check_main from .Builder import main as builder_main +from .Linting import main as lint_main, clang_tidy_diagnose import click import logging @@ -44,11 +45,39 @@ def cli(loglevel): @cli.command() -@click.option('--fix-issues/--no-fix-issues', default=False) -def style_check(fix_issues): +@click.option('--fix-issues', default=False, is_flag=True) +def stylecheck(fix_issues): logger.info("Invoking style checker") - style_check_main() + style_check_main(fix_issues) + + +@cli.command() +@click.option("--diagnose", default=False, is_flag=True) +@click.option('--fix-issues', default=False, is_flag=True) +@click.option('--force', default=False, is_flag=True) +def lint(diagnose, fix_issues, force): + if diagnose: + clang_tidy_diagnose() + return + + if fix_issues: + if not force: + print("""Fixing isssues using Clang Tidy will break your code. +Make sure that you have committed your changes BEFORE DOING THIS. +Even so, this feature is experimental and there have been reports of +clang-tidy modying system libraries - therefore, USE THIS FEATURE AT +YOUR OWN RISK. + +Write 'I understand' to proceed.""") + print(":") + x = input() + if x.lower() != "i understand": + print("Wrong answer - stopping!") + exit(-1) + + logger.info("Invoking linter") + lint_main(fix_issues) @cli.command() diff --git a/src/QsPasses/src/GateCounter/GateCounter.cpp b/src/QsPasses/src/GateCounter/GateCounter.cpp index 540a85e626..194f49ab06 100644 --- a/src/QsPasses/src/GateCounter/GateCounter.cpp +++ b/src/QsPasses/src/GateCounter/GateCounter.cpp @@ -5,60 +5,65 @@ using namespace llvm; -namespace { +namespace +{ -void visitor(Function &F) +void Visitor(Function& f) { - errs() << "(gate-counter) " << F.getName() << "\n"; - errs() << "(gate-counter) number of arguments: " << F.arg_size() << "\n"; + errs() << "(gate-counter) " << f.getName() << "\n"; + errs() << "(gate-counter) number of arguments: " << f.arg_size() << "\n"; } struct GateCounterPass : PassInfoMixin { - PreservedAnalyses run(Function &F, FunctionAnalysisManager &) - { - visitor(F); + static auto run(Function& f, FunctionAnalysisManager& /*unused*/) -> PreservedAnalyses // NOLINT + { + Visitor(f); - return PreservedAnalyses::all(); - } + return PreservedAnalyses::all(); + } }; -struct LegacyGateCounterPass : public FunctionPass +class CLegacyGateCounterPass : public FunctionPass { - static char ID; - LegacyGateCounterPass() - : FunctionPass(ID) - {} + public: + static char ID; + CLegacyGateCounterPass() + : FunctionPass(ID) + { + } - bool runOnFunction(Function &F) override - { - visitor(F); - return false; - } + auto runOnFunction(Function& f) -> bool override + { + Visitor(f); + return false; + } }; -} // namespace +} // namespace -llvm::PassPluginLibraryInfo getGateCounterPluginInfo() +auto GetGateCounterPluginInfo() -> llvm::PassPluginLibraryInfo { - return {LLVM_PLUGIN_API_VERSION, "GateCounter", LLVM_VERSION_STRING, [](PassBuilder &PB) { - PB.registerPipelineParsingCallback([](StringRef Name, FunctionPassManager &FPM, - ArrayRef) { - if (Name == "gate-counter") - { - FPM.addPass(GateCounterPass()); - return true; - } - return false; - }); - }}; + return {LLVM_PLUGIN_API_VERSION, "GateCounter", LLVM_VERSION_STRING, [](PassBuilder& pb) { + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { + if (name == "gate-counter") + { + fpm.addPass(GateCounterPass()); + return true; + } + return false; + }); + }}; } -extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +extern "C" LLVM_ATTRIBUTE_WEAK auto llvmGetPassPluginInfo() -> ::llvm::PassPluginLibraryInfo { - return getGateCounterPluginInfo(); + return GetGateCounterPluginInfo(); } -char LegacyGateCounterPass::ID = 0; -static RegisterPass LegacyGateCounterRegistration("legacy-gate-counter", - "Gate Counter Pass", true, - false); +char CLegacyGateCounterPass::ID = 0; +static RegisterPass LegacyGateCounterRegistration( + "legacy-gate-counter", + "Gate Counter Pass", + true, + false); From cfb4b9354459448037d32778f7787d35cc7b41a3 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 11:34:25 +0200 Subject: [PATCH 006/106] Refactoring CI module --- src/QsPasses/site-packages/TasksCI/__main__.py | 6 +++--- .../TasksCI/{Builder/__init__.py => builder.py} | 6 +++--- .../{FormatSource/__init__.py => formatting.py} | 4 ++-- .../TasksCI/{Linting/__init__.py => linting.py} | 12 ++++++------ .../TasksCI/{Project.py => settings.py} | 0 .../TasksCI/{ToolChain/__init__.py => toolchain.py} | 0 6 files changed, 14 insertions(+), 14 deletions(-) rename src/QsPasses/site-packages/TasksCI/{Builder/__init__.py => builder.py} (94%) rename src/QsPasses/site-packages/TasksCI/{FormatSource/__init__.py => formatting.py} (98%) rename src/QsPasses/site-packages/TasksCI/{Linting/__init__.py => linting.py} (90%) rename src/QsPasses/site-packages/TasksCI/{Project.py => settings.py} (100%) rename src/QsPasses/site-packages/TasksCI/{ToolChain/__init__.py => toolchain.py} (100%) diff --git a/src/QsPasses/site-packages/TasksCI/__main__.py b/src/QsPasses/site-packages/TasksCI/__main__.py index 947c49bf7b..69e83e40ef 100644 --- a/src/QsPasses/site-packages/TasksCI/__main__.py +++ b/src/QsPasses/site-packages/TasksCI/__main__.py @@ -2,9 +2,9 @@ # Licensed under the MIT License. -from .FormatSource import main as style_check_main -from .Builder import main as builder_main -from .Linting import main as lint_main, clang_tidy_diagnose +from .formatting import main as style_check_main +from .builder import main as builder_main +from .linting import main as lint_main, clang_tidy_diagnose import click import logging diff --git a/src/QsPasses/site-packages/TasksCI/Builder/__init__.py b/src/QsPasses/site-packages/TasksCI/builder.py similarity index 94% rename from src/QsPasses/site-packages/TasksCI/Builder/__init__.py rename to src/QsPasses/site-packages/TasksCI/builder.py index 9b77c7035d..329f1cbb32 100644 --- a/src/QsPasses/site-packages/TasksCI/Builder/__init__.py +++ b/src/QsPasses/site-packages/TasksCI/builder.py @@ -2,8 +2,8 @@ # Licensed under the MIT License. import os -from .. import Project -from ..Project import PROJECT_ROOT +from . import settings +from .settings import PROJECT_ROOT import logging import subprocess import sys @@ -40,7 +40,7 @@ def build_project(build_dir: str, generator=None, concurrency=None): cmd = ["ninja"] if concurrency is None: - concurrency = Project.get_concurrency() + concurrency = settings.get_concurrency() cmd.append('-j{}'.format(concurrency)) diff --git a/src/QsPasses/site-packages/TasksCI/FormatSource/__init__.py b/src/QsPasses/site-packages/TasksCI/formatting.py similarity index 98% rename from src/QsPasses/site-packages/TasksCI/FormatSource/__init__.py rename to src/QsPasses/site-packages/TasksCI/formatting.py index 13f5a4e72b..9cf87c609b 100644 --- a/src/QsPasses/site-packages/TasksCI/FormatSource/__init__.py +++ b/src/QsPasses/site-packages/TasksCI/formatting.py @@ -7,8 +7,8 @@ import subprocess import sys -from ..Project import PROJECT_ROOT -from ..ToolChain import discover_formatter +from .settings import PROJECT_ROOT +from .toolchain import discover_formatter logger = logging.getLogger("FormatChecker") CLANG_FORMAT_EXE = discover_formatter() diff --git a/src/QsPasses/site-packages/TasksCI/Linting/__init__.py b/src/QsPasses/site-packages/TasksCI/linting.py similarity index 90% rename from src/QsPasses/site-packages/TasksCI/Linting/__init__.py rename to src/QsPasses/site-packages/TasksCI/linting.py index 538a0bfa07..6f12f49d3c 100644 --- a/src/QsPasses/site-packages/TasksCI/Linting/__init__.py +++ b/src/QsPasses/site-packages/TasksCI/linting.py @@ -2,9 +2,9 @@ # Licensed under the MIT License. import logging -from ..Builder import configure_cmake, build_project -from .. import ToolChain -from ..Project import PROJECT_ROOT +from .builder import configure_cmake, build_project +from . import toolchain +from .settings import PROJECT_ROOT import os import subprocess import sys @@ -14,10 +14,10 @@ def clang_tidy_diagnose(): config = subprocess.check_output( - [ToolChain.discover_tidy(), '-dump-config'], cwd=PROJECT_ROOT).decode() + [toolchain.discover_tidy(), '-dump-config'], cwd=PROJECT_ROOT).decode() check_list = subprocess.check_output( - [ToolChain.discover_tidy(), '-list-checks'], cwd=PROJECT_ROOT).decode() + [toolchain.discover_tidy(), '-list-checks'], cwd=PROJECT_ROOT).decode() checks = [x.strip() for x in check_list.split("\n") if '-' in x] @@ -31,7 +31,7 @@ def clang_tidy_diagnose(): def run_clang_tidy(source_dir, build_dir, filename, fix_issues: bool = False): - clang_tidy_binary = ToolChain.discover_tidy() + clang_tidy_binary = toolchain.discover_tidy() cmd = [clang_tidy_binary] output_file = os.path.abspath(os.path.join(build_dir, 'clang_tidy_fixes.yaml')) diff --git a/src/QsPasses/site-packages/TasksCI/Project.py b/src/QsPasses/site-packages/TasksCI/settings.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/Project.py rename to src/QsPasses/site-packages/TasksCI/settings.py diff --git a/src/QsPasses/site-packages/TasksCI/ToolChain/__init__.py b/src/QsPasses/site-packages/TasksCI/toolchain.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/ToolChain/__init__.py rename to src/QsPasses/site-packages/TasksCI/toolchain.py From d8949bfd38141b7796129b54b479fe5380e5e9ca Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 14:16:26 +0200 Subject: [PATCH 007/106] Refactoring --- src/QsPasses/CMakeLists.txt | 6 +-- src/QsPasses/README.md | 14 ++++++- .../examples/ClassicalIrCommandline/Makefile | 15 ++++++++ .../examples/ClassicalIrCommandline/README.md | 36 ++++++++++++++++++ .../classical-program.bc | Bin 0 -> 2416 bytes .../classical-program.c | 17 +++++++++ src/QsPasses/src/Llvm.hpp | 6 ++- .../OpsCounter.cpp} | 30 ++++++++------- src/QsPasses/src/OpsCounter/OpsCounter.hpp | 7 ++++ 9 files changed, 112 insertions(+), 19 deletions(-) create mode 100644 src/QsPasses/examples/ClassicalIrCommandline/Makefile create mode 100644 src/QsPasses/examples/ClassicalIrCommandline/README.md create mode 100644 src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc create mode 100644 src/QsPasses/examples/ClassicalIrCommandline/classical-program.c rename src/QsPasses/src/{GateCounter/GateCounter.cpp => OpsCounter/OpsCounter.cpp} (52%) create mode 100644 src/QsPasses/src/OpsCounter/OpsCounter.hpp diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt index 4135da0dd6..6cfd104282 100644 --- a/src/QsPasses/CMakeLists.txt +++ b/src/QsPasses/CMakeLists.txt @@ -11,8 +11,8 @@ message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wconversion -Wpedantic") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") # Needed for clang-tidy set(CMAKE_EXPORT_COMPILE_COMMANDS ON) @@ -27,6 +27,6 @@ if(NOT LLVM_ENABLE_RTTI) endif() # The main libary -add_library(QSharpPasses SHARED src/GateCounter/GateCounter.cpp) +add_library(QSharpPasses SHARED src/OpsCounter/OpsCounter.cpp) target_link_libraries(QSharpPasses "$<$:-undefined dynamic_lookup>") diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index b9a2728355..0472337a67 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -42,7 +42,13 @@ make [target] ## Running a pass -Yet to be written +You can run a pass using `opt` as follows: + +```sh +opt -load-pass-plugin ../../{Debug,Release}/libQSharpPasses.{dylib,so} --passes="operation-counter" -disable-output classical-program.bc +``` + +For a gentle introduction, see examples. ## CI @@ -78,6 +84,12 @@ Finally, to analyse the code, run make lint ``` +You can run all processes by running: + +```sh +make runci +``` + As `clang-tidy` and `clang-format` acts slightly different from version to version and on different platforms, it is recommended that you use a docker image to perform these steps. diff --git a/src/QsPasses/examples/ClassicalIrCommandline/Makefile b/src/QsPasses/examples/ClassicalIrCommandline/Makefile new file mode 100644 index 0000000000..b69052cb63 --- /dev/null +++ b/src/QsPasses/examples/ClassicalIrCommandline/Makefile @@ -0,0 +1,15 @@ +emit-llvm: + clang -O1 -S -emit-llvm classical-program.c -o classical-program.ll + +emit-llvm-bc: + clang -O1 -c -emit-llvm classical-program.c -o classical-program.bc + + +debug-ng-pass-mac: emit-llvm-bc + opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib --passes="operation-counter" -disable-output classical-program.bc + + + +clean: + rm -f classical-program.ll + rm -f classical-program.bc \ No newline at end of file diff --git a/src/QsPasses/examples/ClassicalIrCommandline/README.md b/src/QsPasses/examples/ClassicalIrCommandline/README.md new file mode 100644 index 0000000000..b293fc6b5c --- /dev/null +++ b/src/QsPasses/examples/ClassicalIrCommandline/README.md @@ -0,0 +1,36 @@ +# Emitting classical IRs + +This example demonstrates how to emit a classical IR and run a custom +pass on it. The purpose of this example is to teach the user how to apply +a pass to a IR using commandline tools only. + +IRs can be represented either by a human readible language or through bytecode. For +C programs former is generated by + +```sh + clang -O1 -S -emit-llvm classical-program.c -o classical-program.ll +``` + +where as the latter is generated writing: + +```sh + clang -O1 -c -emit-llvm classical-program.c -o classical-program.bc +``` + +This generates a nice and short IR which makes not too overwhelming to understand what is going on. + +## Legacy passes + +This part assumes that you have build the QsPasses library. + +```sh +opt -load ../../{Debug,Release}/libQSharpPasses.{dylib,so} -legacy-operation-counter -analyze classical-program.ll +``` + +## Next-gen passes + +This part assumes that you have build the QsPasses library. + +```sh +opt -load-pass-plugin ../../{Debug,Release}/libQSharpPasses.{dylib,so} --passes="operation-counter" -disable-output classical-program.bc +``` diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc new file mode 100644 index 0000000000000000000000000000000000000000..81f5c5224dc790063ec2c7aa487e9a61e44edde2 GIT binary patch literal 2416 zcmZ`*Z%kX)6~D%YYruYX0JUa$etiz<=CuudIGJl=vjH#7oDOM5%LgWE$*F!hChK2p7`)y( zn9y4~Y-+=gS}cP}r?hP9I@3lL^aIx$E*91Ab@1ZZB>yLcWiVlQ#{5jG=W6!CHGa7W zdf(s?(!yvtrf%jhzNTtFb}Iqhc49Hr(KeJ^XiX%B^vC@ww@ZcIJR(~(Xt-Iv42Pf% z8xVRM#-q;fJo*51J;IKCyRO4tr&awHJac_}D^Xy#@mmgp&i|m@rZ+->X^sAg-L1mFr(Mf|>r-PvMSW89jX-CX2Q*o;Fd za#g}IC`l2&8ArWJznMrG5={|b8oRYCil;uyF_juqm9_VBY}Lc<#I*M^#x=!LAm1~w z9pLmCu6W`j>qA0(*qet0T1=Sl$nWkQ=>zz5px_yc!50O)j;{LuCM`5UmnOR-`Jt>(l^jYE?CXz`~_9 zokxaJ3(epD;fhL}Z!;%+W(dZ65R+fS`bo9SE^o1u9g4oBXiF3p24zg+nV!?^A>DmF z^H63^Y6dxg>&J`iq|jK~7C%~2_7%mx1!Z3;ybzG^6*UILN)nDrN}2_pbe4`V2PtMt z(9HuQ#tnD{CS%^iO$u+=KaAVUV(%TL9m`A+7iY znW9c|Lwa!VQ6s1g$z)Vb<_v&65=qCNdr)N_Y%>4IF$aQjtO&STq)>r=-NUY$*yR{J z`KEg*v9~DprfdBN1=|hLmQ>nr6rP7mzn-^$sAVWGo z{s~}E_jebwlVY~}bz3}h0ABEBc>7R@xVt8HJ;tqX!mBZUq2MMB!?vt=`exi-avgu{ zh?|7MIGi3HdYIDD2yj26#b@7S+*mUHT)|O;O~$IH>HKSPTd82rihU4}uk(%_(Vm29 zVd0N0;3)}#Jx~nI*q4r7*w!-#IcC?zd?|D--5ua|ww|nP88^THXQw?I@4FK}nkd*7 z#NJZD@xfAWu>d|gO3piT@M_$64gxqrI{&i59JqD&Hv!qYT|u|Y>;CCtX7tRqz`*p& z&8EL@GCMBjksxlZnP~Z~Dr%N{&VKsmf(??DEj(WlPnX2gN%1szT^7HOB0NG*1EzeO zlNXbk&Ssy*$#=yTu3h`&Oh#|M=;*Nw{wig9+or#SUK&)O(74y*8}&~`CtT-5erO^% zae4f*XXX_?IN|e8&Cc*WJ>5Os0_sxdgn6XzQup`FBg09}8COX#U^j zj1~$D5c<+Rs7_j+WI-i`K9rO66JySFF#D{IxQR}G_9!a`9abnstKI;#| lg3v7hO8ow*anI!wl5h4EpJ%#zd=w6k!s$_ZoGSaT_HTfd7o-3H literal 0 HcmV?d00001 diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.c b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.c new file mode 100644 index 0000000000..be19ee2fcc --- /dev/null +++ b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.c @@ -0,0 +1,17 @@ +int foo(int x) +{ + return x; +} + +void bar(int x, int y) +{ + foo(x + y); +} + +int main() +{ + foo(2); + bar(3, 2); + + return 0; +} \ No newline at end of file diff --git a/src/QsPasses/src/Llvm.hpp b/src/QsPasses/src/Llvm.hpp index 408ad61cdf..cbff875717 100644 --- a/src/QsPasses/src/Llvm.hpp +++ b/src/QsPasses/src/Llvm.hpp @@ -10,6 +10,8 @@ #pragma GCC diagnostic ignored "-Wsign-compare" #pragma GCC diagnostic ignored "-Wunknown-warning-option" #pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Wall" +#pragma GCC diagnostic ignored "-Weverything" #endif #if defined(__clang__) @@ -21,6 +23,8 @@ #pragma clang diagnostic ignored "-Wreturn-std-move" #pragma clang diagnostic ignored "-Wunknown-warning-option" #pragma clang diagnostic ignored "-Wunused-parameter" +#pragma clang diagnostic ignored "-Wall" +#pragma clang diagnostic ignored "-Weverything" #endif #include "llvm/IR/LegacyPassManager.h" @@ -34,4 +38,4 @@ #if defined(__GNUC__) #pragma GCC diagnostic pop -#endif \ No newline at end of file +#endif diff --git a/src/QsPasses/src/GateCounter/GateCounter.cpp b/src/QsPasses/src/OpsCounter/OpsCounter.cpp similarity index 52% rename from src/QsPasses/src/GateCounter/GateCounter.cpp rename to src/QsPasses/src/OpsCounter/OpsCounter.cpp index 194f49ab06..b9bbc1469a 100644 --- a/src/QsPasses/src/GateCounter/GateCounter.cpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.cpp @@ -1,6 +1,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +#include "OpsCounter/OpsCounter.hpp" + #include "Llvm.hpp" using namespace llvm; @@ -10,11 +12,11 @@ namespace void Visitor(Function& f) { - errs() << "(gate-counter) " << f.getName() << "\n"; - errs() << "(gate-counter) number of arguments: " << f.arg_size() << "\n"; + errs() << "(operation-counter) " << f.getName() << "\n"; + errs() << "(operation-counter) number of arguments: " << f.arg_size() << "\n"; } -struct GateCounterPass : PassInfoMixin +struct OpsCounterPass : PassInfoMixin { static auto run(Function& f, FunctionAnalysisManager& /*unused*/) -> PreservedAnalyses // NOLINT { @@ -24,11 +26,11 @@ struct GateCounterPass : PassInfoMixin } }; -class CLegacyGateCounterPass : public FunctionPass +class CLegacyOpsCounterPass : public FunctionPass { public: static char ID; - CLegacyGateCounterPass() + CLegacyOpsCounterPass() : FunctionPass(ID) { } @@ -41,14 +43,14 @@ class CLegacyGateCounterPass : public FunctionPass }; } // namespace -auto GetGateCounterPluginInfo() -> llvm::PassPluginLibraryInfo +llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() { - return {LLVM_PLUGIN_API_VERSION, "GateCounter", LLVM_VERSION_STRING, [](PassBuilder& pb) { + return {LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder& pb) { pb.registerPipelineParsingCallback( [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { - if (name == "gate-counter") + if (name == "operation-counter") { - fpm.addPass(GateCounterPass()); + fpm.addPass(OpsCounterPass()); return true; } return false; @@ -56,14 +58,14 @@ auto GetGateCounterPluginInfo() -> llvm::PassPluginLibraryInfo }}; } -extern "C" LLVM_ATTRIBUTE_WEAK auto llvmGetPassPluginInfo() -> ::llvm::PassPluginLibraryInfo +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return GetGateCounterPluginInfo(); + return GetOpsCounterPluginInfo(); } -char CLegacyGateCounterPass::ID = 0; -static RegisterPass LegacyGateCounterRegistration( - "legacy-gate-counter", +char CLegacyOpsCounterPass::ID = 0; +static RegisterPass LegacyOpsCounterRegistration( + "legacy-operation-counter", "Gate Counter Pass", true, false); diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.hpp b/src/QsPasses/src/OpsCounter/OpsCounter.hpp new file mode 100644 index 0000000000..a26cc5e757 --- /dev/null +++ b/src/QsPasses/src/OpsCounter/OpsCounter.hpp @@ -0,0 +1,7 @@ +#pragma once +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "Llvm.hpp" + +auto GetOpsCounterPluginInfo() -> llvm::PassPluginLibraryInfo; From cc1a0e6270488712d500c84f8b49304616f8faef Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 14:16:47 +0200 Subject: [PATCH 008/106] Removing binary IR --- .../ClassicalIrCommandline/classical-program.bc | Bin 2416 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc deleted file mode 100644 index 81f5c5224dc790063ec2c7aa487e9a61e44edde2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2416 zcmZ`*Z%kX)6~D%YYruYX0JUa$etiz<=CuudIGJl=vjH#7oDOM5%LgWE$*F!hChK2p7`)y( zn9y4~Y-+=gS}cP}r?hP9I@3lL^aIx$E*91Ab@1ZZB>yLcWiVlQ#{5jG=W6!CHGa7W zdf(s?(!yvtrf%jhzNTtFb}Iqhc49Hr(KeJ^XiX%B^vC@ww@ZcIJR(~(Xt-Iv42Pf% z8xVRM#-q;fJo*51J;IKCyRO4tr&awHJac_}D^Xy#@mmgp&i|m@rZ+->X^sAg-L1mFr(Mf|>r-PvMSW89jX-CX2Q*o;Fd za#g}IC`l2&8ArWJznMrG5={|b8oRYCil;uyF_juqm9_VBY}Lc<#I*M^#x=!LAm1~w z9pLmCu6W`j>qA0(*qet0T1=Sl$nWkQ=>zz5px_yc!50O)j;{LuCM`5UmnOR-`Jt>(l^jYE?CXz`~_9 zokxaJ3(epD;fhL}Z!;%+W(dZ65R+fS`bo9SE^o1u9g4oBXiF3p24zg+nV!?^A>DmF z^H63^Y6dxg>&J`iq|jK~7C%~2_7%mx1!Z3;ybzG^6*UILN)nDrN}2_pbe4`V2PtMt z(9HuQ#tnD{CS%^iO$u+=KaAVUV(%TL9m`A+7iY znW9c|Lwa!VQ6s1g$z)Vb<_v&65=qCNdr)N_Y%>4IF$aQjtO&STq)>r=-NUY$*yR{J z`KEg*v9~DprfdBN1=|hLmQ>nr6rP7mzn-^$sAVWGo z{s~}E_jebwlVY~}bz3}h0ABEBc>7R@xVt8HJ;tqX!mBZUq2MMB!?vt=`exi-avgu{ zh?|7MIGi3HdYIDD2yj26#b@7S+*mUHT)|O;O~$IH>HKSPTd82rihU4}uk(%_(Vm29 zVd0N0;3)}#Jx~nI*q4r7*w!-#IcC?zd?|D--5ua|ww|nP88^THXQw?I@4FK}nkd*7 z#NJZD@xfAWu>d|gO3piT@M_$64gxqrI{&i59JqD&Hv!qYT|u|Y>;CCtX7tRqz`*p& z&8EL@GCMBjksxlZnP~Z~Dr%N{&VKsmf(??DEj(WlPnX2gN%1szT^7HOB0NG*1EzeO zlNXbk&Ssy*$#=yTu3h`&Oh#|M=;*Nw{wig9+or#SUK&)O(74y*8}&~`CtT-5erO^% zae4f*XXX_?IN|e8&Cc*WJ>5Os0_sxdgn6XzQup`FBg09}8COX#U^j zj1~$D5c<+Rs7_j+WI-i`K9rO66JySFF#D{IxQR}G_9!a`9abnstKI;#| lg3v7hO8ow*anI!wl5h4EpJ%#zd=w6k!s$_ZoGSaT_HTfd7o-3H From 1a98e31d49231a7c1154d9b91614dd93e8586953 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 14:17:50 +0200 Subject: [PATCH 009/106] Updating gitignore --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 77a2c5708e..e0f3d8dcec 100644 --- a/.gitignore +++ b/.gitignore @@ -306,6 +306,9 @@ paket-files/ __pycache__/ *.pyc +# Python Virtual environments +*__venv/ + # Cake - Uncomment if you are using it # tools/** # !tools/packages.config From 74a492457c98d302243735c2ba84d29786a0dae6 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 14:40:38 +0200 Subject: [PATCH 010/106] Creating root tool for performing CI tasks --- src/QsPasses/Makefile | 14 --- src/QsPasses/develop.env | 3 - src/QsPasses/manage | 10 ++ .../site-packages/TasksCI/__main__.py | 92 +-------------- src/QsPasses/site-packages/TasksCI/cli.py | 106 ++++++++++++++++++ 5 files changed, 117 insertions(+), 108 deletions(-) delete mode 100644 src/QsPasses/develop.env create mode 100755 src/QsPasses/manage create mode 100644 src/QsPasses/site-packages/TasksCI/cli.py diff --git a/src/QsPasses/Makefile b/src/QsPasses/Makefile index 0a281789aa..e039211bb9 100644 --- a/src/QsPasses/Makefile +++ b/src/QsPasses/Makefile @@ -1,18 +1,4 @@ -stylecheck: - @source develop.env && python -m TasksCI stylecheck - -fixstyle: - @source develop.env && python -m TasksCI --loglevel warning stylecheck --fix-issues - -lint: - @source develop.env && python -m TasksCI lint - -tests: - @source develop.env && python -m TasksCI test - - clean: rm -rf Release/ rm -rf Debug/ -runci: stylecheck lint tests clean diff --git a/src/QsPasses/develop.env b/src/QsPasses/develop.env deleted file mode 100644 index 002d66b4ee..0000000000 --- a/src/QsPasses/develop.env +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -export PYTHONPATH=$PYTHONPATH:$PWD/site-packages \ No newline at end of file diff --git a/src/QsPasses/manage b/src/QsPasses/manage new file mode 100755 index 0000000000..ada5de795d --- /dev/null +++ b/src/QsPasses/manage @@ -0,0 +1,10 @@ +#!/usr/bin/env python +import os +import sys + +ROOT = os.path.dirname(__file__) +sys.path.insert(0,os.path.join(ROOT, "site-packages")) + +from TasksCI.cli import cli + +cli() \ No newline at end of file diff --git a/src/QsPasses/site-packages/TasksCI/__main__.py b/src/QsPasses/site-packages/TasksCI/__main__.py index 69e83e40ef..425abf50fa 100644 --- a/src/QsPasses/site-packages/TasksCI/__main__.py +++ b/src/QsPasses/site-packages/TasksCI/__main__.py @@ -1,97 +1,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. - -from .formatting import main as style_check_main -from .builder import main as builder_main -from .linting import main as lint_main, clang_tidy_diagnose - -import click -import logging -import sys - -logger = logging.getLogger() - -# Logging configuration -ch = logging.StreamHandler() -ch.setLevel(logging.DEBUG) -formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') -ch.setFormatter(formatter) -logger.addHandler(ch) - -# By default we only log errors -logger.setLevel(logging.ERROR) - - -@click.group() -@click.option('--loglevel', default="error") -def cli(loglevel): - levels = { - "critical": 50, - "error": 40, - "warning": 30, - "info": 20, - "debug": 10, - "notset": 0 - } - - loglevel = loglevel.lower() - if loglevel not in levels: - logger.critical("Invalid log level") - sys.exit(-1) - - logger.setLevel(levels[loglevel]) - logger.info("Loglevel set to {}".format(loglevel)) - - -@cli.command() -@click.option('--fix-issues', default=False, is_flag=True) -def stylecheck(fix_issues): - logger.info("Invoking style checker") - - style_check_main(fix_issues) - - -@cli.command() -@click.option("--diagnose", default=False, is_flag=True) -@click.option('--fix-issues', default=False, is_flag=True) -@click.option('--force', default=False, is_flag=True) -def lint(diagnose, fix_issues, force): - if diagnose: - clang_tidy_diagnose() - return - - if fix_issues: - if not force: - print("""Fixing isssues using Clang Tidy will break your code. -Make sure that you have committed your changes BEFORE DOING THIS. -Even so, this feature is experimental and there have been reports of -clang-tidy modying system libraries - therefore, USE THIS FEATURE AT -YOUR OWN RISK. - -Write 'I understand' to proceed.""") - print(":") - x = input() - if x.lower() != "i understand": - print("Wrong answer - stopping!") - exit(-1) - - logger.info("Invoking linter") - lint_main(fix_issues) - - -@cli.command() -@click.option('--debug/--no-debug', default=True) -@click.option('--generator', default=None) -def test(debug, generator): - logger.info("Building and testing") - - build_dir = "Debug" - if not debug: - build_dir = "Release" - - builder_main(build_dir, generator, True) - +from .cli import cli if __name__ == '__main__': cli() diff --git a/src/QsPasses/site-packages/TasksCI/cli.py b/src/QsPasses/site-packages/TasksCI/cli.py new file mode 100644 index 0000000000..1a9a1f3f25 --- /dev/null +++ b/src/QsPasses/site-packages/TasksCI/cli.py @@ -0,0 +1,106 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +from .formatting import main as style_check_main +from .builder import main as builder_main +from .linting import main as lint_main, clang_tidy_diagnose + +import click +import logging +import sys + +logger = logging.getLogger() + +# Logging configuration +ch = logging.StreamHandler() +ch.setLevel(logging.DEBUG) +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +ch.setFormatter(formatter) +logger.addHandler(ch) + +# By default we only log errors +logger.setLevel(logging.ERROR) + + +@click.group() +@click.option('--loglevel', default="error") +def cli(loglevel): + levels = { + "critical": 50, + "error": 40, + "warning": 30, + "info": 20, + "debug": 10, + "notset": 0 + } + + loglevel = loglevel.lower() + if loglevel not in levels: + logger.critical("Invalid log level") + sys.exit(-1) + + logger.setLevel(levels[loglevel]) + logger.info("Loglevel set to {}".format(loglevel)) + + +@cli.command() +@click.option('--fix-issues', default=False, is_flag=True) +def stylecheck(fix_issues): + logger.info("Invoking style checker") + + style_check_main(fix_issues) + + +@cli.command() +@click.option("--diagnose", default=False, is_flag=True) +@click.option('--fix-issues', default=False, is_flag=True) +@click.option('--force', default=False, is_flag=True) +def lint(diagnose, fix_issues, force): + if diagnose: + clang_tidy_diagnose() + return + + if fix_issues: + if not force: + print("""Fixing isssues using Clang Tidy will break your code. +Make sure that you have committed your changes BEFORE DOING THIS. +Even so, this feature is experimental and there have been reports of +clang-tidy modying system libraries - therefore, USE THIS FEATURE AT +YOUR OWN RISK. + +Write 'I understand' to proceed.""") + print(":") + x = input() + if x.lower() != "i understand": + print("Wrong answer - stopping!") + exit(-1) + + logger.info("Invoking linter") + lint_main(fix_issues) + + +@cli.command() +@click.option('--debug/--no-debug', default=True) +@click.option('--generator', default=None) +def test(debug, generator): + logger.info("Building and testing") + + build_dir = "Debug" + if not debug: + build_dir = "Release" + + builder_main(build_dir, generator, True) + + +@cli.command() +def runci(): + build_dir = "Debug" + + style_check_main(False) + lint_main(False) + builder_main(build_dir, None, True) + + +if __name__ == '__main__': + cli() From 9841c95acd3d273dc2f77ee7765d582ef0ac138c Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 14:46:52 +0200 Subject: [PATCH 011/106] Updating documentation --- src/QsPasses/README.md | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index 0472337a67..1a7a15675c 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -69,30 +69,26 @@ These adds the necessary environment variables to ensure that you have the `Task To check the style, run ```sh -make stylecheck +./manage stylecheck ``` To test that the code compiles and tests passes run ```sh -make tests +./manage test ``` Finally, to analyse the code, run ```sh -make lint +./manage lint ``` You can run all processes by running: ```sh -make runci +./manage runci ``` As `clang-tidy` and `clang-format` acts slightly different from version to version and on different platforms, it is recommended -that you use a docker image to perform these steps. - -# TODOs - -Look at https://github.com/llvm-mirror/clang-tools-extra/blob/master/clang-tidy/tool/run-clang-tidy.py +that you use a docker image to perform these steps. TODO(TFR): The docker image is not added yet and this will be documented in the future. From 1a5e95f8ecc78960a1886f785f0a2060ecc2d332 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 17:05:21 +0200 Subject: [PATCH 012/106] Refactoring pass --- .../classical-program.bc | Bin 0 -> 2416 bytes src/QsPasses/src/OpsCounter/OpsCounter.cpp | 68 ++++-------------- src/QsPasses/src/OpsCounter/OpsCounter.hpp | 30 ++++++++ 3 files changed, 43 insertions(+), 55 deletions(-) create mode 100644 src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc new file mode 100644 index 0000000000000000000000000000000000000000..81f5c5224dc790063ec2c7aa487e9a61e44edde2 GIT binary patch literal 2416 zcmZ`*Z%kX)6~D%YYruYX0JUa$etiz<=CuudIGJl=vjH#7oDOM5%LgWE$*F!hChK2p7`)y( zn9y4~Y-+=gS}cP}r?hP9I@3lL^aIx$E*91Ab@1ZZB>yLcWiVlQ#{5jG=W6!CHGa7W zdf(s?(!yvtrf%jhzNTtFb}Iqhc49Hr(KeJ^XiX%B^vC@ww@ZcIJR(~(Xt-Iv42Pf% z8xVRM#-q;fJo*51J;IKCyRO4tr&awHJac_}D^Xy#@mmgp&i|m@rZ+->X^sAg-L1mFr(Mf|>r-PvMSW89jX-CX2Q*o;Fd za#g}IC`l2&8ArWJznMrG5={|b8oRYCil;uyF_juqm9_VBY}Lc<#I*M^#x=!LAm1~w z9pLmCu6W`j>qA0(*qet0T1=Sl$nWkQ=>zz5px_yc!50O)j;{LuCM`5UmnOR-`Jt>(l^jYE?CXz`~_9 zokxaJ3(epD;fhL}Z!;%+W(dZ65R+fS`bo9SE^o1u9g4oBXiF3p24zg+nV!?^A>DmF z^H63^Y6dxg>&J`iq|jK~7C%~2_7%mx1!Z3;ybzG^6*UILN)nDrN}2_pbe4`V2PtMt z(9HuQ#tnD{CS%^iO$u+=KaAVUV(%TL9m`A+7iY znW9c|Lwa!VQ6s1g$z)Vb<_v&65=qCNdr)N_Y%>4IF$aQjtO&STq)>r=-NUY$*yR{J z`KEg*v9~DprfdBN1=|hLmQ>nr6rP7mzn-^$sAVWGo z{s~}E_jebwlVY~}bz3}h0ABEBc>7R@xVt8HJ;tqX!mBZUq2MMB!?vt=`exi-avgu{ zh?|7MIGi3HdYIDD2yj26#b@7S+*mUHT)|O;O~$IH>HKSPTd82rihU4}uk(%_(Vm29 zVd0N0;3)}#Jx~nI*q4r7*w!-#IcC?zd?|D--5ua|ww|nP88^THXQw?I@4FK}nkd*7 z#NJZD@xfAWu>d|gO3piT@M_$64gxqrI{&i59JqD&Hv!qYT|u|Y>;CCtX7tRqz`*p& z&8EL@GCMBjksxlZnP~Z~Dr%N{&VKsmf(??DEj(WlPnX2gN%1szT^7HOB0NG*1EzeO zlNXbk&Ssy*$#=yTu3h`&Oh#|M=;*Nw{wig9+or#SUK&)O(74y*8}&~`CtT-5erO^% zae4f*XXX_?IN|e8&Cc*WJ>5Os0_sxdgn6XzQup`FBg09}8COX#U^j zj1~$D5c<+Rs7_j+WI-i`K9rO66JySFF#D{IxQR}G_9!a`9abnstKI;#| lg3v7hO8ow*anI!wl5h4EpJ%#zd=w6k!s$_ZoGSaT_HTfd7o-3H literal 0 HcmV?d00001 diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.cpp b/src/QsPasses/src/OpsCounter/OpsCounter.cpp index b9bbc1469a..9400bbdb82 100644 --- a/src/QsPasses/src/OpsCounter/OpsCounter.cpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.cpp @@ -7,65 +7,23 @@ using namespace llvm; -namespace -{ - -void Visitor(Function& f) -{ - errs() << "(operation-counter) " << f.getName() << "\n"; - errs() << "(operation-counter) number of arguments: " << f.arg_size() << "\n"; -} - -struct OpsCounterPass : PassInfoMixin -{ - static auto run(Function& f, FunctionAnalysisManager& /*unused*/) -> PreservedAnalyses // NOLINT - { - Visitor(f); - - return PreservedAnalyses::all(); - } -}; - -class CLegacyOpsCounterPass : public FunctionPass -{ - public: - static char ID; - CLegacyOpsCounterPass() - : FunctionPass(ID) - { - } - - auto runOnFunction(Function& f) -> bool override - { - Visitor(f); - return false; - } -}; -} // namespace - llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() { - return {LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder& pb) { - pb.registerPipelineParsingCallback( - [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { - if (name == "operation-counter") - { - fpm.addPass(OpsCounterPass()); - return true; - } - return false; - }); - }}; + return { + LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder &pb) { + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "operation-counter") + { + fpm.addPass(COpsCounterPrinter(llvm::errs())); + return true; + } + return false; + }); + }}; } extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return GetOpsCounterPluginInfo(); + return GetOpsCounterPluginInfo(); } - -char CLegacyOpsCounterPass::ID = 0; -static RegisterPass LegacyOpsCounterRegistration( - "legacy-operation-counter", - "Gate Counter Pass", - true, - false); diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.hpp b/src/QsPasses/src/OpsCounter/OpsCounter.hpp index a26cc5e757..4095c6ad34 100644 --- a/src/QsPasses/src/OpsCounter/OpsCounter.hpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.hpp @@ -4,4 +4,34 @@ #include "Llvm.hpp" +class COpsCounterPrinter : public llvm::PassInfoMixin +{ +public: + explicit COpsCounterPrinter(llvm::raw_ostream &out_stream) + : out_stream_(out_stream) + {} + + // llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); + auto run(llvm::Function &f, llvm::FunctionAnalysisManager & /*unused*/) + -> llvm::PreservedAnalyses // NOLINT + { + out_stream_ << "(operation-counter) " << f.getName() << "\n"; + out_stream_ << "(operation-counter) number of arguments: " << f.arg_size() << "\n"; + + return llvm::PreservedAnalyses::all(); + } + /* + TODO(TFR): Documentation suggests that there such be a isRequired, however, comes out as + unused after compilation + */ + + static bool isRequired() + { + return true; + } + +private: + llvm::raw_ostream &out_stream_; +}; + auto GetOpsCounterPluginInfo() -> llvm::PassPluginLibraryInfo; From f8c7a9731ddb4dfb69f683cd622c950f5910f6b4 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 17:37:42 +0200 Subject: [PATCH 013/106] Preparing analysis module --- .../classical-program.bc | Bin 2416 -> 2400 bytes .../examples/ClassicalIrCommandline/out.txt | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/QsPasses/examples/ClassicalIrCommandline/out.txt diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc index 81f5c5224dc790063ec2c7aa487e9a61e44edde2..bb5a7848a56b2412806ec92ed5bbcb91408915ef 100644 GIT binary patch delta 360 zcmew$^gxLB-T`rL1|SdtVwZ`$(t^3ax5RS?Hux|C1r&K07?>tnE|-`I7!8-fYhJg;DXvLb*E&Wo{+NJXk2#P^`gTuEAdE(O$^VQB}}h$-!Q% zF*Rdq6FN)@3~eEY z9T*rCfVOe~u>%9ofeEJ+fR14S@)$v)4U7|RC*^b9u}u!}KrerJ~0VBf&>k9o2`#}!tv#AFRl4**)Hb4CCF delta 392 zcmaDL^g)RC-T`rL1|Sdt;*g2F(t@$SxB7DjHux|C1r&K07}zFSE)QP}%O|i`YP1(FU@tq-ZZ@I4dCLVJ-yyO~0J$%gh4AUmVEvV*;H zLA#j&dqqZb83%in1N#A)LyfY}7RoSe{PBg+<<&viXNods7RuZ@AoE}$?=3}{7mTtD z#T@KqGun$7I;wyq(2xrDVz41)73~EJ;3hQiwg@nYF)&PaXFj3BkigIua@c`^K>_Gm z4j^V=;5jhilmgHbEI>BMLkta!6K*I3m^eICFyg89Yg~`DA;Vngwb3QQ2ol2B> zk?3~i=3$E#X6p@y^=~*^opHA6aJJguY<0oej-gV4fkAchQ8rb*Yz8kUMuzg_jKre& z;>@bl08eKHpVYF{oaCIuymW=K)S}|d{5%CiLp?)11BT5C>|%_ZznLXI*f%gWu}rq- NxWWpSnJmHS0RYRyfqeh~ diff --git a/src/QsPasses/examples/ClassicalIrCommandline/out.txt b/src/QsPasses/examples/ClassicalIrCommandline/out.txt new file mode 100644 index 0000000000..e69de29bb2 From 87c08b9fa1fa74709c32c9fa182ca4ece5ae23b4 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 10:37:01 +0200 Subject: [PATCH 014/106] Adding a style proposal --- src/QsPasses/CMakeLists.txt | 20 ++++- src/QsPasses/CONTRIBUTING.md | 77 +++++++++++++++++++ src/QsPasses/README.md | 64 ++++++++++++++- .../examples/ClassicalIrCommandline/Makefile | 2 +- .../examples/ClassicalIrCommandline/out.txt | 0 .../site-packages/TasksCI/__main__.py | 2 +- src/QsPasses/site-packages/TasksCI/builder.py | 2 +- src/QsPasses/site-packages/TasksCI/cli.py | 2 +- .../site-packages/TasksCI/formatting.py | 6 +- src/QsPasses/site-packages/TasksCI/linting.py | 2 +- .../site-packages/TasksCI/settings.py | 2 +- .../site-packages/TasksCI/toolchain.py | 2 +- src/QsPasses/src/Llvm.hpp | 2 +- src/QsPasses/src/OpsCounter/OpsCounter.cpp | 17 +++- src/QsPasses/src/OpsCounter/OpsCounter.hpp | 60 +++++++++++++-- 15 files changed, 239 insertions(+), 21 deletions(-) create mode 100644 src/QsPasses/CONTRIBUTING.md delete mode 100644 src/QsPasses/examples/ClassicalIrCommandline/out.txt diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt index 6cfd104282..976649b6ab 100644 --- a/src/QsPasses/CMakeLists.txt +++ b/src/QsPasses/CMakeLists.txt @@ -8,7 +8,7 @@ message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") # Setting the standard for -set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") @@ -18,14 +18,30 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") set(CMAKE_EXPORT_COMPILE_COMMANDS ON) include_directories(${LLVM_INCLUDE_DIRS}) +link_directories(${LLVM_LIBRARY_DIRS}) add_definitions(${LLVM_DEFINITIONS}) include_directories(${CMAKE_SOURCE_DIR}/src) -# LLVM uses RTTI by default - added here for consistency + + +# Compiler flags +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always") + +# LLVM is normally built without RTTI. Be consistent with that. if(NOT LLVM_ENABLE_RTTI) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") endif() +# -fvisibility-inlines-hidden is set when building LLVM and on Darwin warnings +# are triggered if llvm-tutor is built without this flag (though otherwise it +# builds fine). For consistency, add it here too. +include(CheckCXXCompilerFlag) +check_cxx_compiler_flag("-fvisibility-inlines-hidden" SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG) +if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} EQUAL "1") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") +endif() + + # The main libary add_library(QSharpPasses SHARED src/OpsCounter/OpsCounter.cpp) target_link_libraries(QSharpPasses diff --git a/src/QsPasses/CONTRIBUTING.md b/src/QsPasses/CONTRIBUTING.md new file mode 100644 index 0000000000..5cdd18327f --- /dev/null +++ b/src/QsPasses/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing (Proposal - WiP) + +This document is work in progress and nothing is set in stone. + +## Why do we need a style guide? + +Consistency and readibility such that it is easy to read and understand code that was not written by yourself. For example, if one developer uses `CamelCase` for namespaces and `snake_case` for classes while another uses `snake_case` for namespaces and `CamelCase` you may end up with code sections that looks like this + +```cpp +int32_t main() +{ + name_space1::Class1 hello; + NameSpace2::class_name world; +} +``` + +which is hard to read. + +## What does the style guide apply to? + +The style guide applies to any new code written as well as code that is being refactored added to the `QsPasses` library. We do not rewrite existing code for the sake just changing the style. + +## Style discrepency + +In case of a discrepency between this guideline and `clang-tidy` or `clang-format`, +clang tools rule. In case of discrency between this guide and any guides subsequently referenced guides, this guide rule. However, feel free to suggest changes. Changes will be incorporated on the basis +that updated styles are apply to new code and not existing code. + +## Naming + +Naming is taken from the [Microsoft AirSim](https://github.com/microsoft/AirSim/blob/master/docs/coding_guidelines.md) project. + +| **Code Element** | **Style** | **Comment** | +| --------------------- | -------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | +| Namespace | snake_case | Differentiates `namespace::ClassName` and `ClassName::SubClass` names | +| Class name | CamelCase | To differentiate from STL types which ISO recommends (do not use "C" or "T" prefixes) | +| Function name | camelCase | Lower case start is almost universal except for .Net world | +| Parameters/Locals | snake_case | Vast majority of standards recommends this because \_ is more readable to C++ crowd (although not much to Java/.Net crowd) | +| Member variables | snake_case_with\_ | The prefix \_ is heavily discouraged as ISO has rules around reserving \_identifiers, so we recommend suffix instead | +| Enums and its members | CamelCase | Most except very old standards agree with this one | +| Globals | g_snake_case | Avoid using globals whenever possible, but if you have to use `g_`. | +| Constants | UPPER_CASE | Very contentious and we just have to pick one here, unless if is a private constant in class or method, then use naming for Members or Locals | +| File names | Match case of class name in file | Lot of pro and cons either way but this removes inconsistency in auto generated code (important for ROS) | + +## Modernise when possible + +In general, modernise the code where possible. For instance, prefer `using` of `typedef`. + +## Header guards + +Prefer `#pragma once` over `#ifdef` protection. + +## Code TODOs must contain owner name or Github issue + +```sh +% ./manage runci +(...) +QsPasses/src/OpsCounter/OpsCounter.cpp:39:21: error: missing username/bug in TODO [google-readability-todo,-warnings-as-errors] + // TODO: Fails to load if this is present + ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // TODO(tfr): Fails to load if this is present +``` + +## Always add copyrights + +Always add copyrights at the top of the file. + +```text +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +``` + +For header files, prefer to put `#prama once` before the copyright. + +## Tabs vs. spaces + +Seriously, this should not even be a discussion: It does not matter. If you prefer one over the other feel free to write in whatever style you prefer as long as you use `clang-format` before making a PR. Again, the key here is consistency and readibility. diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index 1a7a15675c..e242736dbb 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -2,11 +2,41 @@ This library defines LLVM passes used for optimising and transforming the IR. -## Getting started - The Q# pass library is a dynamic library that can be compiled and ran separately from the rest of the project code. +## What does LLVM passes do? + +Example 1: Optimisation + +``` +define double @test(double %x) { +entry: + %addtmp = fadd double 3.000000e+00, %x + %addtmp1 = fadd double %x, 3.000000e+00 + %multmp = fmul double %addtmp, %addtmp1 + ret double %multmp +} +``` + +``` +define double @test(double %x) { +entry: + %addtmp = fadd double 3.000000e+00, %x + ret double %addtmp +} +``` + +Example 2: Analytics + +Example 3: Validation + +## Out-of-source Pass + +This library is build as set of out-of-source-passes. All this means is that we will not be downloading the LLVM repository and modifying this repository directly. You can read more [here](https://llvm.org/docs/CMake.html#cmake-out-of-source-pass). + +# Getting started + ## Dependencies This library is written in C++ and depends on: @@ -92,3 +122,33 @@ You can run all processes by running: As `clang-tidy` and `clang-format` acts slightly different from version to version and on different platforms, it is recommended that you use a docker image to perform these steps. TODO(TFR): The docker image is not added yet and this will be documented in the future. + +# Developer FAQ + +## Pass does not load + +One error that you may encounter is that an analysis pass does not load with output similar to this: + +```sh +% opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -enable-debugify --passes="operation-counter" -disable-output classical-program.bc +Failed to load passes from '../../Debug/libQSharpPasses.dylib'. Request ignored. +opt: unknown pass name 'operation-counter' +``` + +This is likely becuase you have forgotten to instantiate static class members. For instance, in the case of an instance of `llvm::AnalysisInfoMixin` you are required to have static member `Key`: + +```cpp +class COpsCounterPass : public llvm::AnalysisInfoMixin { +private: + static llvm::AnalysisKey Key; //< REQUIRED by llvm registration + friend struct llvm::AnalysisInfoMixin; +}; +``` + +If you forget to instantiate this variable in your corresponding `.cpp` file, + +```cpp +// llvm::AnalysisKey COpsCounterPass::Key; //< Uncomment this line to make everything work +``` + +everything will compile, but the pass will fail to load. There will be no linking errors either. diff --git a/src/QsPasses/examples/ClassicalIrCommandline/Makefile b/src/QsPasses/examples/ClassicalIrCommandline/Makefile index b69052cb63..47609f40ac 100644 --- a/src/QsPasses/examples/ClassicalIrCommandline/Makefile +++ b/src/QsPasses/examples/ClassicalIrCommandline/Makefile @@ -6,7 +6,7 @@ emit-llvm-bc: debug-ng-pass-mac: emit-llvm-bc - opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib --passes="operation-counter" -disable-output classical-program.bc + opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -debug --passes="operation-counter" -disable-output classical-program.bc diff --git a/src/QsPasses/examples/ClassicalIrCommandline/out.txt b/src/QsPasses/examples/ClassicalIrCommandline/out.txt deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/QsPasses/site-packages/TasksCI/__main__.py b/src/QsPasses/site-packages/TasksCI/__main__.py index 425abf50fa..59d8961088 100644 --- a/src/QsPasses/site-packages/TasksCI/__main__.py +++ b/src/QsPasses/site-packages/TasksCI/__main__.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from .cli import cli diff --git a/src/QsPasses/site-packages/TasksCI/builder.py b/src/QsPasses/site-packages/TasksCI/builder.py index 329f1cbb32..73d8f7fb87 100644 --- a/src/QsPasses/site-packages/TasksCI/builder.py +++ b/src/QsPasses/site-packages/TasksCI/builder.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import os diff --git a/src/QsPasses/site-packages/TasksCI/cli.py b/src/QsPasses/site-packages/TasksCI/cli.py index 1a9a1f3f25..62a4962ed6 100644 --- a/src/QsPasses/site-packages/TasksCI/cli.py +++ b/src/QsPasses/site-packages/TasksCI/cli.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/src/QsPasses/site-packages/TasksCI/formatting.py b/src/QsPasses/site-packages/TasksCI/formatting.py index 9cf87c609b..578580ba98 100644 --- a/src/QsPasses/site-packages/TasksCI/formatting.py +++ b/src/QsPasses/site-packages/TasksCI/formatting.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from os import path @@ -35,7 +35,7 @@ def require_todo_owner(filename, contents, cursor, fix_issues): def enforce_cpp_license(filename, contents, cursor, fix_issues): - return require_token("""// Copyright (c) Microsoft Corporation. All rights reserved. + return require_token("""// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. """, filename, contents, cursor, fix_issues) @@ -46,7 +46,7 @@ def enforce_py_license(filename, contents, cursor, fix_issues): if contents.strip() == "": return cursor, False - return require_token("""# Copyright (c) Microsoft Corporation. All rights reserved. + return require_token("""# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. """, filename, contents, cursor, fix_issues) diff --git a/src/QsPasses/site-packages/TasksCI/linting.py b/src/QsPasses/site-packages/TasksCI/linting.py index 6f12f49d3c..4a692ab302 100644 --- a/src/QsPasses/site-packages/TasksCI/linting.py +++ b/src/QsPasses/site-packages/TasksCI/linting.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import logging diff --git a/src/QsPasses/site-packages/TasksCI/settings.py b/src/QsPasses/site-packages/TasksCI/settings.py index ef4ba177cd..85d5b667f2 100644 --- a/src/QsPasses/site-packages/TasksCI/settings.py +++ b/src/QsPasses/site-packages/TasksCI/settings.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from os import path diff --git a/src/QsPasses/site-packages/TasksCI/toolchain.py b/src/QsPasses/site-packages/TasksCI/toolchain.py index f666565860..5bda4c33f9 100644 --- a/src/QsPasses/site-packages/TasksCI/toolchain.py +++ b/src/QsPasses/site-packages/TasksCI/toolchain.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import shutil diff --git a/src/QsPasses/src/Llvm.hpp b/src/QsPasses/src/Llvm.hpp index cbff875717..f24aef3726 100644 --- a/src/QsPasses/src/Llvm.hpp +++ b/src/QsPasses/src/Llvm.hpp @@ -1,5 +1,5 @@ #pragma once -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #if defined(__GNUC__) diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.cpp b/src/QsPasses/src/OpsCounter/OpsCounter.cpp index 9400bbdb82..12922350ee 100644 --- a/src/QsPasses/src/OpsCounter/OpsCounter.cpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.cpp @@ -1,16 +1,20 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include "OpsCounter/OpsCounter.hpp" #include "Llvm.hpp" +#include +#include using namespace llvm; +llvm::AnalysisKey COpsCounterPass::Key; llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() { return { LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the printer pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, ArrayRef /*unused*/) { if (name == "operation-counter") @@ -20,6 +24,17 @@ llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() } return false; }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { + fpm.addPass(COpsCounterPrinter(llvm::errs())); + }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { + // TODO: Fails to load if this is present + fam.registerPass([] { return COpsCounterPass(); }); + }); }}; } diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.hpp b/src/QsPasses/src/OpsCounter/OpsCounter.hpp index 4095c6ad34..d4adf42781 100644 --- a/src/QsPasses/src/OpsCounter/OpsCounter.hpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.hpp @@ -1,9 +1,43 @@ #pragma once -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include "Llvm.hpp" +class COpsCounterPass : public llvm::AnalysisInfoMixin +{ +public: + using Result = llvm::StringMap; + + Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) + { + COpsCounterPass::Result opcode_map; + + for (auto &basic_block : function) + { + for (auto &instruction : basic_block) + { + auto name = instruction.getOpcodeName(); + + if (opcode_map.find(name) == opcode_map.end()) + { + opcode_map[instruction.getOpcodeName()] = 1; + } + else + { + opcode_map[instruction.getOpcodeName()]++; + } + } + } + + return opcode_map; + } + +private: + static llvm::AnalysisKey Key; + friend struct llvm::AnalysisInfoMixin; +}; + class COpsCounterPrinter : public llvm::PassInfoMixin { public: @@ -11,15 +45,31 @@ class COpsCounterPrinter : public llvm::PassInfoMixin : out_stream_(out_stream) {} - // llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); - auto run(llvm::Function &f, llvm::FunctionAnalysisManager & /*unused*/) + auto run(llvm::Function &function, llvm::FunctionAnalysisManager &fam) -> llvm::PreservedAnalyses // NOLINT { - out_stream_ << "(operation-counter) " << f.getName() << "\n"; - out_stream_ << "(operation-counter) number of arguments: " << f.arg_size() << "\n"; + auto &opcode_map = fam.getResult(function); + + out_stream_ << "Stats for '" << function.getName() << "'\n"; + out_stream_ << "===========================\n"; + + constexpr auto str1 = "Opcode"; + constexpr auto str2 = "# Used"; + out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); + out_stream_ << "---------------------------" + << "\n"; + + for (auto const &instruction : opcode_map) + { + out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), + instruction.second); + } + out_stream_ << "---------------------------" + << "\n\n"; return llvm::PreservedAnalyses::all(); } + /* TODO(TFR): Documentation suggests that there such be a isRequired, however, comes out as unused after compilation From f09550873541649f65e69f5d4865bff9c1f85359 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 10:41:15 +0200 Subject: [PATCH 015/106] Adding style proposal --- src/QsPasses/CONTRIBUTING.md | 8 +- .../site-packages/TasksCI/formatting.py | 4 +- src/QsPasses/src/OpsCounter/OpsCounter.cpp | 46 +++---- src/QsPasses/src/OpsCounter/OpsCounter.hpp | 117 +++++++++--------- 4 files changed, 90 insertions(+), 85 deletions(-) diff --git a/src/QsPasses/CONTRIBUTING.md b/src/QsPasses/CONTRIBUTING.md index 5cdd18327f..0b4493bb8d 100644 --- a/src/QsPasses/CONTRIBUTING.md +++ b/src/QsPasses/CONTRIBUTING.md @@ -1,6 +1,12 @@ # Contributing (Proposal - WiP) -This document is work in progress and nothing is set in stone. +This document is work in progress and nothing is set in stone. In case you do not want to feel like reading this style guide, just run + +```sh +./manage runci +``` + +from the `QsPasses` directory as all points defined in this document is automatically enforces. You can then refer to this guide for an explanation for why and how. ## Why do we need a style guide? diff --git a/src/QsPasses/site-packages/TasksCI/formatting.py b/src/QsPasses/site-packages/TasksCI/formatting.py index 578580ba98..e03801bc7e 100644 --- a/src/QsPasses/site-packages/TasksCI/formatting.py +++ b/src/QsPasses/site-packages/TasksCI/formatting.py @@ -35,7 +35,7 @@ def require_todo_owner(filename, contents, cursor, fix_issues): def enforce_cpp_license(filename, contents, cursor, fix_issues): - return require_token("""// Copyright (c) Microsoft Corporation. + return require_token("""// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. """, filename, contents, cursor, fix_issues) @@ -46,7 +46,7 @@ def enforce_py_license(filename, contents, cursor, fix_issues): if contents.strip() == "": return cursor, False - return require_token("""# Copyright (c) Microsoft Corporation. + return require_token("""# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. """, filename, contents, cursor, fix_issues) diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.cpp b/src/QsPasses/src/OpsCounter/OpsCounter.cpp index 12922350ee..ac1b798e4b 100644 --- a/src/QsPasses/src/OpsCounter/OpsCounter.cpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.cpp @@ -12,33 +12,33 @@ llvm::AnalysisKey COpsCounterPass::Key; llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() { - return { - LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the printer - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "operation-counter") - { - fpm.addPass(COpsCounterPrinter(llvm::errs())); - return true; - } - return false; - }); + return { + LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, + [](PassBuilder& pb) + { + // Registering the printer + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) + { + if (name == "operation-counter") + { + fpm.addPass(COpsCounterPrinter(llvm::errs())); + return true; + } + return false; + }); - pb.registerVectorizerStartEPCallback( - [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { - fpm.addPass(COpsCounterPrinter(llvm::errs())); - }); + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager& fpm, llvm::PassBuilder::OptimizationLevel /*level*/) + { fpm.addPass(COpsCounterPrinter(llvm::errs())); }); - // Registering the analysis module - pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { - // TODO: Fails to load if this is present - fam.registerPass([] { return COpsCounterPass(); }); - }); - }}; + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager& fam) + { fam.registerPass([] { return COpsCounterPass(); }); }); + }}; } extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return GetOpsCounterPluginInfo(); + return GetOpsCounterPluginInfo(); } diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.hpp b/src/QsPasses/src/OpsCounter/OpsCounter.hpp index d4adf42781..44320aaff1 100644 --- a/src/QsPasses/src/OpsCounter/OpsCounter.hpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.hpp @@ -6,82 +6,81 @@ class COpsCounterPass : public llvm::AnalysisInfoMixin { -public: - using Result = llvm::StringMap; + public: + using Result = llvm::StringMap; - Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) - { - COpsCounterPass::Result opcode_map; - - for (auto &basic_block : function) + Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/) { - for (auto &instruction : basic_block) - { - auto name = instruction.getOpcodeName(); + COpsCounterPass::Result opcode_map; - if (opcode_map.find(name) == opcode_map.end()) - { - opcode_map[instruction.getOpcodeName()] = 1; - } - else + for (auto& basic_block : function) { - opcode_map[instruction.getOpcodeName()]++; + for (auto& instruction : basic_block) + { + auto name = instruction.getOpcodeName(); + + if (opcode_map.find(name) == opcode_map.end()) + { + opcode_map[instruction.getOpcodeName()] = 1; + } + else + { + opcode_map[instruction.getOpcodeName()]++; + } + } } - } - } - return opcode_map; - } + return opcode_map; + } -private: - static llvm::AnalysisKey Key; - friend struct llvm::AnalysisInfoMixin; + private: + static llvm::AnalysisKey Key; + friend struct llvm::AnalysisInfoMixin; }; class COpsCounterPrinter : public llvm::PassInfoMixin { -public: - explicit COpsCounterPrinter(llvm::raw_ostream &out_stream) - : out_stream_(out_stream) - {} - - auto run(llvm::Function &function, llvm::FunctionAnalysisManager &fam) - -> llvm::PreservedAnalyses // NOLINT - { - auto &opcode_map = fam.getResult(function); - - out_stream_ << "Stats for '" << function.getName() << "'\n"; - out_stream_ << "===========================\n"; - - constexpr auto str1 = "Opcode"; - constexpr auto str2 = "# Used"; - out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); - out_stream_ << "---------------------------" - << "\n"; - - for (auto const &instruction : opcode_map) + public: + explicit COpsCounterPrinter(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) { - out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), - instruction.second); } - out_stream_ << "---------------------------" - << "\n\n"; - return llvm::PreservedAnalyses::all(); - } + auto run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) -> llvm::PreservedAnalyses // NOLINT + { + auto& opcode_map = fam.getResult(function); + + out_stream_ << "Stats for '" << function.getName() << "'\n"; + out_stream_ << "===========================\n"; - /* - TODO(TFR): Documentation suggests that there such be a isRequired, however, comes out as - unused after compilation - */ + constexpr auto str1 = "Opcode"; + constexpr auto str2 = "# Used"; + out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); + out_stream_ << "---------------------------" + << "\n"; - static bool isRequired() - { - return true; - } + for (auto const& instruction : opcode_map) + { + out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); + } + out_stream_ << "---------------------------" + << "\n\n"; + + return llvm::PreservedAnalyses::all(); + } + + /* + TODO(TFR): Documentation suggests that there such be a isRequired, however, comes out as + unused after compilation + */ + + static bool isRequired() + { + return true; + } -private: - llvm::raw_ostream &out_stream_; + private: + llvm::raw_ostream& out_stream_; }; auto GetOpsCounterPluginInfo() -> llvm::PassPluginLibraryInfo; From b0c63d6a7329d9b2cb63686a3dc96a0e45c3673a Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 11:47:00 +0200 Subject: [PATCH 016/106] Updating documentation --- src/QsPasses/README.md | 140 ++++++++++++++++-- .../examples/ClassicalIrCommandline/Makefile | 4 +- .../classical-program.bc | Bin 2400 -> 0 bytes src/QsPasses/src/OpsCounter/OpsCounter.hpp | 5 +- 4 files changed, 133 insertions(+), 16 deletions(-) delete mode 100644 src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index e242736dbb..134ef75c0a 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -1,13 +1,22 @@ # Q# Passes for LLVM -This library defines LLVM passes used for optimising and transforming the IR. - -The Q# pass library is a dynamic library that can be compiled and ran separately from the -rest of the project code. +This library defines LLVM passes used for analysing, optimising and transforming the IR. The Q# pass library is a dynamic library that can be compiled and ran separately from the +rest of the project code. While it is not clear whether this possible at the moment, we hope that it will be possible to write passes that enforce the QIR standard. ## What does LLVM passes do? -Example 1: Optimisation +Before getting started, we here provide a few examples of classical use cases for LLVM passes. + +**Example 1: Transformation**. As a first example of what LLVM passes can do, we look at optimisation. Consider a compiler which +compiles + +```c +double test(double x) { + return (1+2+x)*(x+(1+2)); +} +``` + +into following IR: ``` define double @test(double %x) { @@ -19,17 +28,122 @@ entry: } ``` +This code is obviously inefficient as we could get rid of one operation by rewritting the code to: + +```c +double test(double x) { + double y = 3+x; + return y * y; +} +``` + +One purpose of LLVM passes is to allow automatic transformation from the above IR to the IR: + ``` define double @test(double %x) { entry: - %addtmp = fadd double 3.000000e+00, %x - ret double %addtmp + %addtmp = fadd double %x, 3.000000e+00 + %multmp = fmul double %addtmp, %addtmp + ret double %multmp +} +``` + +**Example 2: Analytics**. Another example of useful passes are those generating and collecting statistics about the program. For instance, one analytics program +makes sense for classical programs is to count instructions used to implement functions. Take the C program: + +```c +int foo(int x) +{ + return x; +} + +void bar(int x, int y) +{ + foo(x + y); +} + +int main() +{ + foo(2); + bar(3, 2); + + return 0; +} +``` + +which produces follow IR (without optimisation): + +```language +define dso_local i32 @foo(i32 %0) #0 { + %2 = alloca i32, align 4 + store i32 %0, i32* %2, align 4 + %3 = load i32, i32* %2, align 4 + ret i32 %3 +} + +define dso_local void @bar(i32 %0, i32 %1) #0 { + %3 = alloca i32, align 4 + %4 = alloca i32, align 4 + store i32 %0, i32* %3, align 4 + store i32 %1, i32* %4, align 4 + %5 = load i32, i32* %3, align 4 + %6 = load i32, i32* %4, align 4 + %7 = add nsw i32 %5, %6 + %8 = call i32 @foo(i32 %7) + ret void } + +define dso_local i32 @main() #0 { + %1 = alloca i32, align 4 + store i32 0, i32* %1, align 4 + %2 = call i32 @foo(i32 2) + call void @bar(i32 3, i32 2) + ret i32 0 +} +``` + +A stat pass for this code, would collect following statisics: + +```text +Stats for 'foo' +=========================== +Opcode # Used +--------------------------- +load 1 +ret 1 +alloca 1 +store 1 +--------------------------- + +Stats for 'bar' +=========================== +Opcode # Used +--------------------------- +load 2 +add 1 +ret 1 +alloca 2 +store 2 +call 1 +--------------------------- + +Stats for 'main' +=========================== +Opcode # Used +--------------------------- +ret 1 +alloca 1 +store 1 +call 2 +--------------------------- ``` -Example 2: Analytics +**Example 3: Code validation**. A third use case is code validation. For example, one could write a pass to check whether bounds are exceeded on static arrays [2]. +Note that this is a non-standard usecase as such analysis is usually made using the AST rather than at the IR level. -Example 3: Validation +**References** +[1] https://github.com/banach-space/llvm-tutor#analysis-vs-transformation-pass +[2] https://github.com/victor-fdez/llvm-array-check-pass ## Out-of-source Pass @@ -41,13 +155,13 @@ This library is build as set of out-of-source-passes. All this means is that we This library is written in C++ and depends on: -- LLVM +- LLVM Additional development dependencies include: -- CMake -- clang-format -- clang-tidy +- CMake +- clang-format +- clang-tidy ## Building the passes diff --git a/src/QsPasses/examples/ClassicalIrCommandline/Makefile b/src/QsPasses/examples/ClassicalIrCommandline/Makefile index 47609f40ac..f50340c5a5 100644 --- a/src/QsPasses/examples/ClassicalIrCommandline/Makefile +++ b/src/QsPasses/examples/ClassicalIrCommandline/Makefile @@ -1,8 +1,8 @@ emit-llvm: - clang -O1 -S -emit-llvm classical-program.c -o classical-program.ll + clang -O0 -S -emit-llvm classical-program.c -o classical-program.ll emit-llvm-bc: - clang -O1 -c -emit-llvm classical-program.c -o classical-program.bc + clang -O0 -c -emit-llvm classical-program.c -o classical-program.bc debug-ng-pass-mac: emit-llvm-bc diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc deleted file mode 100644 index bb5a7848a56b2412806ec92ed5bbcb91408915ef..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2400 zcmaJ@Z%kX)6~Fc~Tm$CW0o0h~y_few)_HA{*-qz~*=T@i>vViDT0Sr-6E*~D)&P%Z zivJ|qp0V3YH#9jB(mX9y?JZJM?F&`Wln>ElY(_m~RTNE;#SntkzaipspH(FhZJ6Pj2IzAgBUazrlL?)$jjqXyjk> zeRVx*w+3XA0U@mxp*pREU_j3R;dslnj-u{-waMOkU6V9?wMySC=ziL-nqJa&Ua#v+ zn(R$Zz4^Pf_ReXqyePGFHINxo$93(cqW*&>(ev`O_y>)>GiiR#@?6Sxb?)Ldaj^(` z-xd+7htYmYUn5?6Q`dOvW)iyXWH!~*&^PKR`UpAP-xm})nmo*MNs9D>$k zM(7tXp1Aw$qyIqDs7$-o($3`3J~PFw284VESI%;4s*soXa<+a+;>vM;Ph%yrpF}NK zwc@CrOj?LTC1EcPXNZ!*BHe?g_~n4Gm#yFI5S9Z})pEAJq!HF6uK4tLkW9$L;U|id zDs;>d#f)PS91SL}c(Ibf6KE5O>s$Pq$geMOpp9zC@cFo~@>GdT!a?SZTVzrtjtr4a z81O_<382EIn#%DUpI_kr&+$amOF}YHMB-Qh6G%8s99A3&CMNYb){b*r9<0uCD9>+X z>-Rf^yhhlR`1LLB378Cy-XaQ`jsgHPc_YX~KnZx`u{q56EgZ5^&5ksgfGf};689x; zXNy~o3u}IEGb^Bo1&34Qs*F`ok|H4sjs=;Jg~%BaOA#fF{q;+lllO9LxxyT(^>=gJ zVL;f4*WbU6>>GxS&X*Y2)V-_PK`u2dh2IE--2 zN>d{w;t^z2WISHssqkonh+}>{k-?MgcqGL_Pp2!@dn%SQcwEGhmFfp7jzO9n9jvT= zXu^t$zan+B@7Y}Lci;OcFj;r@$M5cL{q}YDP3>C8Di(0Y2jG*R!}>YB z$}MhjLrt2tX^%5mnCVn;G=f9val;-m-Z!xiRrZ{rQ-Cb~ILi%Lt18^ajSjZazP^8sPV`nKy% z!nNXQooC#467D49DjhaW`tg+@{#KAE(5o~VWj}-mPd=&wwPBU?>B+bmQj$c|@fRK( zvJW=dzvtKkE7Mw#1TK4Fq4T4Y5w?okebH7`PiKlyWMWw64?W=LbO0hfchk? zO5A!}Sl@)l#Q#Mj44HeJbDnSCNVrPA(~k}DlW+t_(mmY|Q%33m;fJ&)?MwW|0{T_5|I zwPj(oL)h6mUf$w20D!mIHJ50cPn<{=oHL%*Qo((Dp|w~58{H-E{J0(is&Q~&FKPbM z5_{k`-rt1GHtt%DyQ1+QK6cc^Zd+NHezsZt=S_CU$3C*Q+&c)+B2?ZwejxFC62A!b zSmO7klSAS=t~;K#+f3_hqP5g}2ci$g#sH_6gk|_xMMZ?Z205k(UePmcXs%K0Ua=)U zbM4wET^W<*lG|qQ{8>u+wbL|#UR~9o@IWx2^o53FgTC_~v3qcG@UnMoWUyZu42uJo z1Ea5rlY>fVcx+U(+0Rl8Az=-X{m$b8PwSG_ysc{^IiV4{oA>e5mt6up=!8xyS z-AuI2GH}NyCWPpl#3lF5gzE+*A=8#gxFI1^1^Rje;^R7{TcF!2b1HAt#kB$P=$qfD z>v#dwR9?HT1-_rakHZ?R0Cv`NDvAEdnCHHuYX41dL}(DI!}BmUFtdKDi`KzfAbLSJ z5S8@;(MCslDg#6ty@&ed>p+b_M;f25gXjyH8+eL=HoA{~)6wap%0=Z=_GNh&%;|p^ zii55{*$*uG0br*8TLAO>bUrjT)*nzt0>h(6agR71eU2B;1Z4U0;F*!Yz*s0^vq3^w iAxT1^;eo*AGqN)Fni3c}JJ1J*``~mRJx-N Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/) { COpsCounterPass::Result opcode_map; - for (auto& basic_block : function) { for (auto& instruction : basic_block) { + if (instruction.isDebugOrPseudoInst()) + { + continue; + } auto name = instruction.getOpcodeName(); if (opcode_map.find(name) == opcode_map.end()) From 564f5189ee899ccb5050c836cc0412c5fdd7e1a5 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 13:47:56 +0200 Subject: [PATCH 017/106] Template based pass generator --- src/QsPasses/CMakeLists.txt | 8 +-- src/QsPasses/{src => include}/Llvm.hpp | 0 src/QsPasses/libs/CMakeLists.txt | 43 +++++++++++++++ .../{src => libs}/OpsCounter/OpsCounter.cpp | 0 .../{src => libs}/OpsCounter/OpsCounter.hpp | 0 src/QsPasses/site-packages/TasksCI/cli.py | 55 ++++++++++++++++++- .../site-packages/TasksCI/formatting.py | 6 +- .../TasksCI/templates/basic/SPECIFICATION.md | 1 + .../TasksCI/templates/basic/{name}.cpp.tpl | 41 ++++++++++++++ .../TasksCI/templates/basic/{name}.hpp.tpl | 13 +++++ 10 files changed, 157 insertions(+), 10 deletions(-) rename src/QsPasses/{src => include}/Llvm.hpp (100%) create mode 100644 src/QsPasses/libs/CMakeLists.txt rename src/QsPasses/{src => libs}/OpsCounter/OpsCounter.cpp (100%) rename src/QsPasses/{src => libs}/OpsCounter/OpsCounter.hpp (100%) create mode 100644 src/QsPasses/site-packages/TasksCI/templates/basic/SPECIFICATION.md create mode 100644 src/QsPasses/site-packages/TasksCI/templates/basic/{name}.cpp.tpl create mode 100644 src/QsPasses/site-packages/TasksCI/templates/basic/{name}.hpp.tpl diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt index 976649b6ab..40c9491c1f 100644 --- a/src/QsPasses/CMakeLists.txt +++ b/src/QsPasses/CMakeLists.txt @@ -22,8 +22,6 @@ link_directories(${LLVM_LIBRARY_DIRS}) add_definitions(${LLVM_DEFINITIONS}) include_directories(${CMAKE_SOURCE_DIR}/src) - - # Compiler flags set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always") @@ -42,7 +40,5 @@ if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} EQUAL "1") endif() -# The main libary -add_library(QSharpPasses SHARED src/OpsCounter/OpsCounter.cpp) -target_link_libraries(QSharpPasses - "$<$:-undefined dynamic_lookup>") +add_subdirectory(libs) + diff --git a/src/QsPasses/src/Llvm.hpp b/src/QsPasses/include/Llvm.hpp similarity index 100% rename from src/QsPasses/src/Llvm.hpp rename to src/QsPasses/include/Llvm.hpp diff --git a/src/QsPasses/libs/CMakeLists.txt b/src/QsPasses/libs/CMakeLists.txt new file mode 100644 index 0000000000..578a55e711 --- /dev/null +++ b/src/QsPasses/libs/CMakeLists.txt @@ -0,0 +1,43 @@ + +macro(list_qs_passes result) + file(GLOB children RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/*) + set(dirlist "") + foreach(child ${children}) + if(IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/${child}) + list(APPEND dirlist ${child}) + endif() + endforeach() + set(${result} ${dirlist}) +endmacro() + +list_qs_passes(QS_PASSES) + +foreach(pass_plugin ${QS_PASSES}) + + # Getting sources + file(GLOB sources RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/${pass_plugin}/*.cpp) + + # Adding library + add_library(${pass_plugin} + SHARED + ${sources}) + + # Adding include directories + target_include_directories( + ${pass_plugin} + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}" + ) + + target_include_directories( + ${pass_plugin} + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/../include" + ) + + + # Linking + target_link_libraries(${pass_plugin} + "$<$:-undefined dynamic_lookup>") + +endforeach() diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.cpp b/src/QsPasses/libs/OpsCounter/OpsCounter.cpp similarity index 100% rename from src/QsPasses/src/OpsCounter/OpsCounter.cpp rename to src/QsPasses/libs/OpsCounter/OpsCounter.cpp diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.hpp b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp similarity index 100% rename from src/QsPasses/src/OpsCounter/OpsCounter.hpp rename to src/QsPasses/libs/OpsCounter/OpsCounter.hpp diff --git a/src/QsPasses/site-packages/TasksCI/cli.py b/src/QsPasses/site-packages/TasksCI/cli.py index 62a4962ed6..09f9e9c4a1 100644 --- a/src/QsPasses/site-packages/TasksCI/cli.py +++ b/src/QsPasses/site-packages/TasksCI/cli.py @@ -9,7 +9,11 @@ import click import logging import sys +import os +LIB_DIR = os.path.abspath(os.path.dirname((__file__))) +TEMPLATE_DIR = os.path.join(LIB_DIR, "templates") +SOURCE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(LIB_DIR))) logger = logging.getLogger() # Logging configuration @@ -63,7 +67,7 @@ def lint(diagnose, fix_issues, force): if fix_issues: if not force: - print("""Fixing isssues using Clang Tidy will break your code. + print("""Fixing isssues using Clang Tidy will break your code. Make sure that you have committed your changes BEFORE DOING THIS. Even so, this feature is experimental and there have been reports of clang-tidy modying system libraries - therefore, USE THIS FEATURE AT @@ -102,5 +106,54 @@ def runci(): builder_main(build_dir, None, True) +@cli.command() +@click.argument( + "name" +) +@click.option( + "--template", + default="basic", +) +def create_pass(name, template): + + target_dir = os.path.join(SOURCE_DIR, "libs", name) + if os.path.exists(target_dir): + logger.error("Pass '{}' already exists".format(name)) + exit(-1) + + if template is None: + raise BaseException("Choice is not implemented yet") + + template_dir = os.path.join(TEMPLATE_DIR, template) + if not os.path.exists(template_dir): + logger.error("Template not found") + exit(-1) + + logger.info(" ".join(["Creating", name, "in", target_dir])) + os.makedirs(target_dir) + + for root, dirs, files in os.walk(template_dir): + + # Creating dirs + for d in dirs: + os.makedirs(os.path.join(target_dir, d)) + + # Generating files + for f in files: + src = os.path.join(root, f) + dest = os.path.join(target_dir, f).format(name=name) + + with open(src, "r") as fb: + contents = fb.read() + + contents = contents.replace("{{name}}", name) + contents = contents.replace("{{name}}", name) + + # with open(dest, "w") as fb: + # fb.write(contents) + + logger.info("- Wrote {}".format(dest)) + + if __name__ == '__main__': cli() diff --git a/src/QsPasses/site-packages/TasksCI/formatting.py b/src/QsPasses/site-packages/TasksCI/formatting.py index e03801bc7e..ebe14283cc 100644 --- a/src/QsPasses/site-packages/TasksCI/formatting.py +++ b/src/QsPasses/site-packages/TasksCI/formatting.py @@ -83,10 +83,10 @@ def enforce_formatting(filename, contents, cursor, fix_issues): # Source pipeline definitions -AUTO_FORMAT_LANGUAGES = [ +SOURCE_PIPELINES = [ { "name": "C++ Main", - "src": path.join(PROJECT_ROOT, "src"), + "src": path.join(PROJECT_ROOT, "libs"), "pipelines": { "hpp": [ @@ -131,7 +131,7 @@ def execute_pipeline(pipeline, filename: str, fix_issues: bool): def main(fix_issues: bool = False): failed = False - for language in AUTO_FORMAT_LANGUAGES: + for language in SOURCE_PIPELINES: logger.info("Formatting {}".format(language["name"])) basedir = language["src"] pipelines = language["pipelines"] diff --git a/src/QsPasses/site-packages/TasksCI/templates/basic/SPECIFICATION.md b/src/QsPasses/site-packages/TasksCI/templates/basic/SPECIFICATION.md new file mode 100644 index 0000000000..f051462f55 --- /dev/null +++ b/src/QsPasses/site-packages/TasksCI/templates/basic/SPECIFICATION.md @@ -0,0 +1 @@ +# {{name}} Specification diff --git a/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.cpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.cpp.tpl new file mode 100644 index 0000000000..c5fcb33feb --- /dev/null +++ b/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.cpp.tpl @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "{name}/{name}.hpp" + +#include "Llvm.hpp" + +#include +#include + +llvm::PreservedAnalyses C{name}Pass::run(llvm::Function &/*function*/, llvm::FunctionAnalysisManager &/*fam*/) +{ + // Implement the pass details here + return llvm::PreservedAnalyses::all(); +} + + +// Registering the plugin +llvm::PassPluginLibraryInfo Get{name}PluginInfo() +{ + using namespace llvm; + return { + LLVM_PLUGIN_API_VERSION, "{name}", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the pass + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "{operation_name}") + { + fpm.addPass(C{name}Pass(llvm::errs())); + return true; + } + + return false; + }); + }}; +} + +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return Get{name}PluginInfo(); +} diff --git a/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.hpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.hpp.tpl new file mode 100644 index 0000000000..dfd33eeb1d --- /dev/null +++ b/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.hpp.tpl @@ -0,0 +1,13 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" + +class C{name}Pass : public llvm::PassInfoMixin +{ +public: + llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); +}; + +auto Get{name}PluginInfo() -> llvm::PassPluginLibraryInfo; From c80fd35725397256545182c618c0a6933c35e8fb Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 14:16:24 +0200 Subject: [PATCH 018/106] Updating template and writing more documentation --- src/QsPasses/README.md | 46 ++++++++++++++++-- src/QsPasses/site-packages/TasksCI/cli.py | 47 ++++++++++++++++--- .../{basic => FunctionPass}/SPECIFICATION.md | 0 .../{basic => FunctionPass}/{name}.cpp.tpl | 7 ++- .../{basic => FunctionPass}/{name}.hpp.tpl | 0 5 files changed, 87 insertions(+), 13 deletions(-) rename src/QsPasses/site-packages/TasksCI/templates/{basic => FunctionPass}/SPECIFICATION.md (100%) rename src/QsPasses/site-packages/TasksCI/templates/{basic => FunctionPass}/{name}.cpp.tpl (90%) rename src/QsPasses/site-packages/TasksCI/templates/{basic => FunctionPass}/{name}.hpp.tpl (100%) diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index 134ef75c0a..07edfded05 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -155,13 +155,13 @@ This library is build as set of out-of-source-passes. All this means is that we This library is written in C++ and depends on: -- LLVM +- LLVM Additional development dependencies include: -- CMake -- clang-format -- clang-tidy +- CMake +- clang-format +- clang-tidy ## Building the passes @@ -194,6 +194,44 @@ opt -load-pass-plugin ../../{Debug,Release}/libQSharpPasses.{dylib,so} --passes= For a gentle introduction, see examples. +## Creating a new pass + +To make it easy to create a new pass, we have created a few templates to get you started quickly: + +```sh +% ./manage create-pass HelloWorld +Available templates: + +1. Function Pass + +Select a template:1 +``` + +At the moment you only have one choice which is a function pass. Over time we will add additional templates. Once you have instantiated your template, you are ready to build it: + +```sh +% mkdir Debug +% cd Debug +% cmake .. +-- The C compiler identification is AppleClang 12.0.5.12050022 +-- The CXX compiler identification is AppleClang 12.0.5.12050022 +(...) +-- Configuring done +-- Generating done +-- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/Debug + +% make + +[ 25%] Building CXX object libs/CMakeFiles/OpsCounter.dir/OpsCounter/OpsCounter.cpp.o +[ 50%] Linking CXX shared library libOpsCounter.dylib +[ 50%] Built target OpsCounter +[ 75%] Building CXX object libs/CMakeFiles/HelloWorld.dir/HelloWorld/HelloWorld.cpp.o +[100%] Linking CXX shared library libHelloWorld.dylib +[100%] Built target HelloWorld +``` + +Your new pass is ready to be implemented. Open `libs/HelloWorld/HelloWorld.cpp` to implement the details of the pass. At the moment, the template will not do much except for print the function names of your code. + ## CI Before making a pull request with changes to this library, please ensure that style checks passes, that the code compiles, diff --git a/src/QsPasses/site-packages/TasksCI/cli.py b/src/QsPasses/site-packages/TasksCI/cli.py index 09f9e9c4a1..adeab1dea0 100644 --- a/src/QsPasses/site-packages/TasksCI/cli.py +++ b/src/QsPasses/site-packages/TasksCI/cli.py @@ -10,6 +10,7 @@ import logging import sys import os +import re LIB_DIR = os.path.abspath(os.path.dirname((__file__))) TEMPLATE_DIR = os.path.join(LIB_DIR, "templates") @@ -112,7 +113,7 @@ def runci(): ) @click.option( "--template", - default="basic", + default=None, ) def create_pass(name, template): @@ -122,13 +123,37 @@ def create_pass(name, template): exit(-1) if template is None: - raise BaseException("Choice is not implemented yet") + # Listing options + options = [] + print("Available templates:") + print("") + for template_name in os.listdir(TEMPLATE_DIR): + if os.path.isdir(os.path.join(TEMPLATE_DIR, template_name)): + options.append(template_name) + + # Printing option + pretty_template_name = re.sub(r'(? len(options) + 1: + try: + n = input("Select a template:") + n = int(n) + except: # noqa: E722 + logger.error("Invalid choice") + exit(-1) + + # Getting the template + template = options[n - 1] template_dir = os.path.join(TEMPLATE_DIR, template) if not os.path.exists(template_dir): - logger.error("Template not found") + logger.error("Template does not exist") exit(-1) + operation_name = re.sub(r'(? /*unused*/) { if (name == "{operation_name}") { - fpm.addPass(C{name}Pass(llvm::errs())); + fpm.addPass(C{name}Pass()); return true; } diff --git a/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.hpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl similarity index 100% rename from src/QsPasses/site-packages/TasksCI/templates/basic/{name}.hpp.tpl rename to src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl From 3078388baf241198c362c370fb9860f1a3fa527e Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 14:36:34 +0200 Subject: [PATCH 019/106] Adding introduction on how to create a pass --- src/QsPasses/README.md | 18 +++++++++++++++++- .../templates/FunctionPass/{name}.cpp.tpl | 14 +++++++++++--- .../templates/FunctionPass/{name}.hpp.tpl | 19 ++++++++++++++++++- 3 files changed, 46 insertions(+), 5 deletions(-) diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index 07edfded05..ade1b0c9f4 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -230,7 +230,23 @@ At the moment you only have one choice which is a function pass. Over time we wi [100%] Built target HelloWorld ``` -Your new pass is ready to be implemented. Open `libs/HelloWorld/HelloWorld.cpp` to implement the details of the pass. At the moment, the template will not do much except for print the function names of your code. +Your new pass is ready to be implemented. Open `libs/HelloWorld/HelloWorld.cpp` to implement the details of the pass. At the moment, the +template will not do much except for print the function names of your code. To test your new pass go to the directory `examples/ClassicalIrCommandline`, +build an IR and run the pass: + +```sh +% cd ../examples/ClassicalIrCommandline +% make +% opt -load-pass-plugin ../../Debug/libs/libHelloWorld.{dylib,so} --passes="hello-world" -disable-output classical-program.ll +``` + +If everything worked, you should see output like this: + +```sh +Implement your pass here: foo +Implement your pass here: bar +Implement your pass here: main +``` ## CI diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl index 61084a1482..feab1944fd 100644 --- a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl +++ b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl @@ -8,17 +8,22 @@ #include #include -llvm::PreservedAnalyses C{name}Pass::run(llvm::Function &/*function*/, llvm::FunctionAnalysisManager &/*fam*/) +llvm::PreservedAnalyses C{name}Pass::run(llvm::Function &function, llvm::FunctionAnalysisManager &/*fam*/) { // Pass body - llvm::errs() << "Implement your pass here\n"; + llvm::errs() << "Implement your pass here: " << function.getName() << "\n"; return llvm::PreservedAnalyses::all(); } +bool C{name}Pass::isRequired() +{ + return true; +} -// Registering the plugin +// Helper functions which we do not expose externally +namespace { llvm::PassPluginLibraryInfo Get{name}PluginInfo() { using namespace llvm; @@ -37,7 +42,10 @@ llvm::PassPluginLibraryInfo Get{name}PluginInfo() }); }}; } +} + +// Interface for loading the plugin extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { return Get{name}PluginInfo(); diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl index dfd33eeb1d..39e9026be4 100644 --- a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl +++ b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl @@ -7,7 +7,24 @@ class C{name}Pass : public llvm::PassInfoMixin { public: + /// Constructors and destructors + /// @{ + C{name}Pass() = default; + C{name}Pass(C{name}Pass const &) = default; + C{name}Pass(C{name}Pass &&) = default; + ~C{name}Pass() = default; + /// @} + + /// Operators + /// @{ + C{name}Pass &operator=(C{name}Pass const &) = default; + C{name}Pass &operator=(C{name}Pass &&) = default; + /// @} + + /// Functions required by LLVM + /// @{ llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); + static bool isRequired(); + /// @} }; -auto Get{name}PluginInfo() -> llvm::PassPluginLibraryInfo; From e975b25779e401dcd5807aa524ca2ec0923f6172 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 15:03:10 +0200 Subject: [PATCH 020/106] Improving code quality --- src/QsPasses/libs/OpsCounter/OpsCounter.cpp | 73 ++++++++++++- src/QsPasses/libs/OpsCounter/OpsCounter.hpp | 110 ++++++++------------ 2 files changed, 111 insertions(+), 72 deletions(-) diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.cpp b/src/QsPasses/libs/OpsCounter/OpsCounter.cpp index ac1b798e4b..293e584b4d 100644 --- a/src/QsPasses/libs/OpsCounter/OpsCounter.cpp +++ b/src/QsPasses/libs/OpsCounter/OpsCounter.cpp @@ -8,8 +8,74 @@ #include #include using namespace llvm; -llvm::AnalysisKey COpsCounterPass::Key; +COpsCounterAnalytics::Result COpsCounterAnalytics::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& /*unused*/) +{ + COpsCounterAnalytics::Result opcode_map; + for (auto& basic_block : function) + { + for (auto& instruction : basic_block) + { + if (instruction.isDebugOrPseudoInst()) + { + continue; + } + auto name = instruction.getOpcodeName(); + + if (opcode_map.find(name) == opcode_map.end()) + { + opcode_map[instruction.getOpcodeName()] = 1; + } + else + { + opcode_map[instruction.getOpcodeName()]++; + } + } + } + + return opcode_map; +} + +COpsCounterPrinter::COpsCounterPrinter(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) +{ +} + +llvm::PreservedAnalyses COpsCounterPrinter::run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) +{ + auto& opcode_map = fam.getResult(function); + + out_stream_ << "Stats for '" << function.getName() << "'\n"; + out_stream_ << "===========================\n"; + + constexpr auto str1 = "Opcode"; + constexpr auto str2 = "# Used"; + out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); + out_stream_ << "---------------------------" + << "\n"; + + for (auto const& instruction : opcode_map) + { + out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); + } + out_stream_ << "---------------------------" + << "\n\n"; + + return llvm::PreservedAnalyses::all(); +} + +bool COpsCounterPrinter::isRequired() +{ + return true; +} + +llvm::AnalysisKey COpsCounterAnalytics::Key; + +// Interface to plugin +namespace +{ llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() { return { @@ -20,7 +86,7 @@ llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() pb.registerPipelineParsingCallback( [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { - if (name == "operation-counter") + if (name == "print") { fpm.addPass(COpsCounterPrinter(llvm::errs())); return true; @@ -34,9 +100,10 @@ llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() // Registering the analysis module pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager& fam) - { fam.registerPass([] { return COpsCounterPass(); }); }); + { fam.registerPass([] { return COpsCounterAnalytics(); }); }); }}; } +} // namespace extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp index a5fb2864b8..b92c430ce5 100644 --- a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp +++ b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp @@ -4,86 +4,58 @@ #include "Llvm.hpp" -class COpsCounterPass : public llvm::AnalysisInfoMixin +class COpsCounterAnalytics : public llvm::AnalysisInfoMixin { public: using Result = llvm::StringMap; - Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/) - { - COpsCounterPass::Result opcode_map; - for (auto& basic_block : function) - { - for (auto& instruction : basic_block) - { - if (instruction.isDebugOrPseudoInst()) - { - continue; - } - auto name = instruction.getOpcodeName(); - - if (opcode_map.find(name) == opcode_map.end()) - { - opcode_map[instruction.getOpcodeName()] = 1; - } - else - { - opcode_map[instruction.getOpcodeName()]++; - } - } - } - - return opcode_map; - } - + /// Constructors and destructors + /// @{ + COpsCounterAnalytics() = default; + COpsCounterAnalytics(COpsCounterAnalytics const&) = delete; + COpsCounterAnalytics(COpsCounterAnalytics&&) = default; + ~COpsCounterAnalytics() = default; + /// @} + + /// Operators + /// @{ + COpsCounterAnalytics& operator=(COpsCounterAnalytics const&) = delete; + COpsCounterAnalytics& operator=(COpsCounterAnalytics&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); + /// @} private: static llvm::AnalysisKey Key; - friend struct llvm::AnalysisInfoMixin; + friend struct llvm::AnalysisInfoMixin; }; class COpsCounterPrinter : public llvm::PassInfoMixin { public: - explicit COpsCounterPrinter(llvm::raw_ostream& out_stream) - : out_stream_(out_stream) - { - } - - auto run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) -> llvm::PreservedAnalyses // NOLINT - { - auto& opcode_map = fam.getResult(function); - - out_stream_ << "Stats for '" << function.getName() << "'\n"; - out_stream_ << "===========================\n"; - - constexpr auto str1 = "Opcode"; - constexpr auto str2 = "# Used"; - out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); - out_stream_ << "---------------------------" - << "\n"; - - for (auto const& instruction : opcode_map) - { - out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); - } - out_stream_ << "---------------------------" - << "\n\n"; - - return llvm::PreservedAnalyses::all(); - } - - /* - TODO(TFR): Documentation suggests that there such be a isRequired, however, comes out as - unused after compilation - */ - - static bool isRequired() - { - return true; - } - + explicit COpsCounterPrinter(llvm::raw_ostream& out_stream); + + /// Constructors and destructors + /// @{ + COpsCounterPrinter() = delete; + COpsCounterPrinter(COpsCounterPrinter const&) = delete; + COpsCounterPrinter(COpsCounterPrinter&&) = default; + ~COpsCounterPrinter() = default; + /// @} + + /// Operators + /// @{ + COpsCounterPrinter& operator=(COpsCounterPrinter const&) = delete; + COpsCounterPrinter& operator=(COpsCounterPrinter&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} private: llvm::raw_ostream& out_stream_; }; - -auto GetOpsCounterPluginInfo() -> llvm::PassPluginLibraryInfo; From 06b09ed69b82e9dc2709fc97902fc22fd6360bda Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 15:04:05 +0200 Subject: [PATCH 021/106] Improving code quality --- src/QsPasses/libs/OpsCounter/OpsCounter.hpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp index b92c430ce5..0c6400f005 100644 --- a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp +++ b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp @@ -35,10 +35,9 @@ class COpsCounterAnalytics : public llvm::AnalysisInfoMixin { public: - explicit COpsCounterPrinter(llvm::raw_ostream& out_stream); - /// Constructors and destructors /// @{ + explicit COpsCounterPrinter(llvm::raw_ostream& out_stream); COpsCounterPrinter() = delete; COpsCounterPrinter(COpsCounterPrinter const&) = delete; COpsCounterPrinter(COpsCounterPrinter&&) = default; From fdb465a88911c0aed8975ccbc6a5a9f45bd12d2c Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 15:20:57 +0200 Subject: [PATCH 022/106] Adding namespaces to passes --- src/QsPasses/libs/OpsCounter/OpsCounter.cpp | 100 +++++++++------- src/QsPasses/libs/OpsCounter/OpsCounter.hpp | 112 ++++++++++-------- .../templates/FunctionPass/{name}.cpp.tpl | 8 ++ .../templates/FunctionPass/{name}.hpp.tpl | 7 ++ 4 files changed, 129 insertions(+), 98 deletions(-) diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.cpp b/src/QsPasses/libs/OpsCounter/OpsCounter.cpp index 293e584b4d..b2030f7c91 100644 --- a/src/QsPasses/libs/OpsCounter/OpsCounter.cpp +++ b/src/QsPasses/libs/OpsCounter/OpsCounter.cpp @@ -9,75 +9,83 @@ #include using namespace llvm; -COpsCounterAnalytics::Result COpsCounterAnalytics::run( - llvm::Function& function, - llvm::FunctionAnalysisManager& /*unused*/) +namespace Microsoft { - COpsCounterAnalytics::Result opcode_map; - for (auto& basic_block : function) +namespace Quantum +{ + COpsCounterAnalytics::Result COpsCounterAnalytics::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& /*unused*/) { - for (auto& instruction : basic_block) + COpsCounterAnalytics::Result opcode_map; + for (auto& basic_block : function) { - if (instruction.isDebugOrPseudoInst()) + for (auto& instruction : basic_block) { - continue; - } - auto name = instruction.getOpcodeName(); + if (instruction.isDebugOrPseudoInst()) + { + continue; + } + auto name = instruction.getOpcodeName(); - if (opcode_map.find(name) == opcode_map.end()) - { - opcode_map[instruction.getOpcodeName()] = 1; - } - else - { - opcode_map[instruction.getOpcodeName()]++; + if (opcode_map.find(name) == opcode_map.end()) + { + opcode_map[instruction.getOpcodeName()] = 1; + } + else + { + opcode_map[instruction.getOpcodeName()]++; + } } } + + return opcode_map; } - return opcode_map; -} + COpsCounterPrinter::COpsCounterPrinter(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) + { + } -COpsCounterPrinter::COpsCounterPrinter(llvm::raw_ostream& out_stream) - : out_stream_(out_stream) -{ -} + llvm::PreservedAnalyses COpsCounterPrinter::run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) + { + auto& opcode_map = fam.getResult(function); -llvm::PreservedAnalyses COpsCounterPrinter::run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) -{ - auto& opcode_map = fam.getResult(function); + out_stream_ << "Stats for '" << function.getName() << "'\n"; + out_stream_ << "===========================\n"; - out_stream_ << "Stats for '" << function.getName() << "'\n"; - out_stream_ << "===========================\n"; + constexpr auto str1 = "Opcode"; + constexpr auto str2 = "# Used"; + out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); + out_stream_ << "---------------------------" + << "\n"; - constexpr auto str1 = "Opcode"; - constexpr auto str2 = "# Used"; - out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); - out_stream_ << "---------------------------" - << "\n"; + for (auto const& instruction : opcode_map) + { + out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); + } + out_stream_ << "---------------------------" + << "\n\n"; - for (auto const& instruction : opcode_map) - { - out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); + return llvm::PreservedAnalyses::all(); } - out_stream_ << "---------------------------" - << "\n\n"; - return llvm::PreservedAnalyses::all(); -} - -bool COpsCounterPrinter::isRequired() -{ - return true; -} + bool COpsCounterPrinter::isRequired() + { + return true; + } -llvm::AnalysisKey COpsCounterAnalytics::Key; + llvm::AnalysisKey COpsCounterAnalytics::Key; +} // namespace Quantum +} // namespace Microsoft // Interface to plugin namespace { llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() { + using namespace Microsoft::Quantum; + return { LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder& pb) diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp index 0c6400f005..0662766e59 100644 --- a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp +++ b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp @@ -4,57 +4,65 @@ #include "Llvm.hpp" -class COpsCounterAnalytics : public llvm::AnalysisInfoMixin +namespace Microsoft { - public: - using Result = llvm::StringMap; - - /// Constructors and destructors - /// @{ - COpsCounterAnalytics() = default; - COpsCounterAnalytics(COpsCounterAnalytics const&) = delete; - COpsCounterAnalytics(COpsCounterAnalytics&&) = default; - ~COpsCounterAnalytics() = default; - /// @} - - /// Operators - /// @{ - COpsCounterAnalytics& operator=(COpsCounterAnalytics const&) = delete; - COpsCounterAnalytics& operator=(COpsCounterAnalytics&&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); - /// @} - private: - static llvm::AnalysisKey Key; - friend struct llvm::AnalysisInfoMixin; -}; - -class COpsCounterPrinter : public llvm::PassInfoMixin +namespace Quantum { - public: - /// Constructors and destructors - /// @{ - explicit COpsCounterPrinter(llvm::raw_ostream& out_stream); - COpsCounterPrinter() = delete; - COpsCounterPrinter(COpsCounterPrinter const&) = delete; - COpsCounterPrinter(COpsCounterPrinter&&) = default; - ~COpsCounterPrinter() = default; - /// @} - - /// Operators - /// @{ - COpsCounterPrinter& operator=(COpsCounterPrinter const&) = delete; - COpsCounterPrinter& operator=(COpsCounterPrinter&&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); - static bool isRequired(); - /// @} - private: - llvm::raw_ostream& out_stream_; -}; + + class COpsCounterAnalytics : public llvm::AnalysisInfoMixin + { + public: + using Result = llvm::StringMap; + + /// Constructors and destructors + /// @{ + COpsCounterAnalytics() = default; + COpsCounterAnalytics(COpsCounterAnalytics const&) = delete; + COpsCounterAnalytics(COpsCounterAnalytics&&) = default; + ~COpsCounterAnalytics() = default; + /// @} + + /// Operators + /// @{ + COpsCounterAnalytics& operator=(COpsCounterAnalytics const&) = delete; + COpsCounterAnalytics& operator=(COpsCounterAnalytics&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); + /// @} + private: + static llvm::AnalysisKey Key; + friend struct llvm::AnalysisInfoMixin; + }; + + class COpsCounterPrinter : public llvm::PassInfoMixin + { + public: + /// Constructors and destructors + /// @{ + explicit COpsCounterPrinter(llvm::raw_ostream& out_stream); + COpsCounterPrinter() = delete; + COpsCounterPrinter(COpsCounterPrinter const&) = delete; + COpsCounterPrinter(COpsCounterPrinter&&) = default; + ~COpsCounterPrinter() = default; + /// @} + + /// Operators + /// @{ + COpsCounterPrinter& operator=(COpsCounterPrinter const&) = delete; + COpsCounterPrinter& operator=(COpsCounterPrinter&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} + private: + llvm::raw_ostream& out_stream_; + }; + +} // namespace Quantum +} // namespace Microsoft diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl index feab1944fd..7cb82d1f3d 100644 --- a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl +++ b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl @@ -8,6 +8,10 @@ #include #include +namespace Microsoft +{ +namespace Quantum +{ llvm::PreservedAnalyses C{name}Pass::run(llvm::Function &function, llvm::FunctionAnalysisManager &/*fam*/) { // Pass body @@ -21,12 +25,16 @@ bool C{name}Pass::isRequired() { return true; } +} // namespace Quantum +} // namespace Microsoft // Helper functions which we do not expose externally namespace { llvm::PassPluginLibraryInfo Get{name}PluginInfo() { + using namespace Microsoft::Quantum; using namespace llvm; + return { LLVM_PLUGIN_API_VERSION, "{name}", LLVM_VERSION_STRING, [](PassBuilder &pb) { // Registering the pass diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl index 39e9026be4..d413f43e24 100644 --- a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl +++ b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl @@ -4,6 +4,11 @@ #include "Llvm.hpp" +namespace Microsoft +{ +namespace Quantum +{ + class C{name}Pass : public llvm::PassInfoMixin { public: @@ -28,3 +33,5 @@ public: /// @} }; +} // namespace Quantum +} // namespace Microsoft From dd810afb797228af49594728ede8b4ab444ae2d4 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 15:53:34 +0200 Subject: [PATCH 023/106] Adding comments to the source --- src/QsPasses/CMakeLists.txt | 30 ++++++----- src/QsPasses/manage | 10 ++-- src/QsPasses/site-packages/TasksCI/builder.py | 21 +++++++- src/QsPasses/site-packages/TasksCI/cli.py | 52 +++++++++++++++++-- .../site-packages/TasksCI/formatting.py | 52 ++++++++++++++----- .../site-packages/TasksCI/toolchain.py | 8 +++ 6 files changed, 138 insertions(+), 35 deletions(-) diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt index 40c9491c1f..49fb66942b 100644 --- a/src/QsPasses/CMakeLists.txt +++ b/src/QsPasses/CMakeLists.txt @@ -3,28 +3,22 @@ cmake_minimum_required(VERSION 3.4.3) project(QSharpPasses) find_package(LLVM REQUIRED CONFIG) +include(CheckCXXCompilerFlag) message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") -# Setting the standard for +# Setting the standard configuration for the C++ compiler +# Rather than allowing C++17, we restrict the ourselves to +# C++14 as this is the standard currently used by LLVM. While +# there is a very small change that the difference in standard +# would break things, it is a possibility nonetheless. set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") -# Needed for clang-tidy -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -include_directories(${LLVM_INCLUDE_DIRS}) -link_directories(${LLVM_LIBRARY_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/src) - -# Compiler flags -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always") - # LLVM is normally built without RTTI. Be consistent with that. if(NOT LLVM_ENABLE_RTTI) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") @@ -33,12 +27,22 @@ endif() # -fvisibility-inlines-hidden is set when building LLVM and on Darwin warnings # are triggered if llvm-tutor is built without this flag (though otherwise it # builds fine). For consistency, add it here too. -include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-fvisibility-inlines-hidden" SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG) if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} EQUAL "1") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") endif() +# We export the compile commands which are needed by clang-tidy +# to run the static analysis +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +# Adding LLVM include directories. We may choose +# to move this to a module level at a later point +include_directories(${LLVM_INCLUDE_DIRS}) +link_directories(${LLVM_LIBRARY_DIRS}) +add_definitions(${LLVM_DEFINITIONS}) +include_directories(${CMAKE_SOURCE_DIR}/src) +# Adding the libraries add_subdirectory(libs) diff --git a/src/QsPasses/manage b/src/QsPasses/manage index ada5de795d..d49d9f2b6f 100755 --- a/src/QsPasses/manage +++ b/src/QsPasses/manage @@ -2,9 +2,11 @@ import os import sys +# Adding the site-packages directory to our Python path +# in order to access the TasksCI module ROOT = os.path.dirname(__file__) -sys.path.insert(0,os.path.join(ROOT, "site-packages")) +sys.path.insert(0, os.path.join(ROOT, "site-packages")) -from TasksCI.cli import cli - -cli() \ No newline at end of file +# Loading the CLI tool and running it +from TasksCI.cli import cli # noqa: E402 +cli() diff --git a/src/QsPasses/site-packages/TasksCI/builder.py b/src/QsPasses/site-packages/TasksCI/builder.py index 73d8f7fb87..bd6b574b04 100644 --- a/src/QsPasses/site-packages/TasksCI/builder.py +++ b/src/QsPasses/site-packages/TasksCI/builder.py @@ -3,6 +3,7 @@ import os from . import settings +from . import toolchain from .settings import PROJECT_ROOT import logging import subprocess @@ -12,6 +13,10 @@ def configure_cmake(build_dir: str, generator=None): + """ + Function that creates a build directory and runs + cmake to configure make, ninja or another generator. + """ logger.info("Source: {}".format(PROJECT_ROOT)) logger.info("Build : {}".format(build_dir)) @@ -19,7 +24,7 @@ def configure_cmake(build_dir: str, generator=None): os.chdir(PROJECT_ROOT) os.makedirs(build_dir, exist_ok=True) - cmake_cmd = ['cmake'] # TODO: get from toolchain + cmake_cmd = [toolchain.discover_cmake()] if generator is not None: cmake_cmd += ['-G', generator] @@ -33,6 +38,10 @@ def configure_cmake(build_dir: str, generator=None): def build_project(build_dir: str, generator=None, concurrency=None): + """ + Given a build directory, this function builds all targets using + a specified generator and concurrency. + """ if generator in ["make", None]: cmd = ["make"] @@ -52,7 +61,11 @@ def build_project(build_dir: str, generator=None, concurrency=None): def run_tests(build_dir: str, concurrency=None): - cmake_cmd = ['ctest'] # TODO: get from toolchain + """ + Runs the unit tests given a build directory. + """ + + cmake_cmd = [toolchain.discover_ctest()] if concurrency is not None: raise BaseException("No support for concurrent testing at the moment.") @@ -64,6 +77,10 @@ def run_tests(build_dir: str, concurrency=None): def main(build_dir: str, generator=None, test: bool = False): + """ + Runs the entire build process by first configuring, the building + and optionally testing the codebase. + """ configure_cmake(build_dir, generator) diff --git a/src/QsPasses/site-packages/TasksCI/cli.py b/src/QsPasses/site-packages/TasksCI/cli.py index adeab1dea0..c3984130d7 100644 --- a/src/QsPasses/site-packages/TasksCI/cli.py +++ b/src/QsPasses/site-packages/TasksCI/cli.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. - from .formatting import main as style_check_main from .builder import main as builder_main from .linting import main as lint_main, clang_tidy_diagnose @@ -12,12 +11,13 @@ import os import re +# Important directories LIB_DIR = os.path.abspath(os.path.dirname((__file__))) TEMPLATE_DIR = os.path.join(LIB_DIR, "templates") SOURCE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(LIB_DIR))) -logger = logging.getLogger() # Logging configuration +logger = logging.getLogger() ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') @@ -31,6 +31,11 @@ @click.group() @click.option('--loglevel', default="error") def cli(loglevel): + """ + Implements the general CLI options such as logging level. + """ + + # Valid values levels = { "critical": 50, "error": 40, @@ -40,6 +45,7 @@ def cli(loglevel): "notset": 0 } + # Getting the logging level and updating loglevel = loglevel.lower() if loglevel not in levels: logger.critical("Invalid log level") @@ -52,8 +58,12 @@ def cli(loglevel): @cli.command() @click.option('--fix-issues', default=False, is_flag=True) def stylecheck(fix_issues): - logger.info("Invoking style checker") + """ + Command for checking the style and optionally fixing issues. + Note that some issues are not automatically fixed. + """ + logger.info("Invoking style checker") style_check_main(fix_issues) @@ -62,10 +72,20 @@ def stylecheck(fix_issues): @click.option('--fix-issues', default=False, is_flag=True) @click.option('--force', default=False, is_flag=True) def lint(diagnose, fix_issues, force): + """ + Command for linting the code. + """ + + # Helpful option in order to diagnose Clang tidy. if diagnose: clang_tidy_diagnose() + + # In case we are diagnosing, no run is performed. return + # Allowing Clang tidy to attempt to fix issues. Generally, + # it is discouraged to use this features as it may result in + # a catastrophy if fix_issues: if not force: print("""Fixing isssues using Clang Tidy will break your code. @@ -81,6 +101,7 @@ def lint(diagnose, fix_issues, force): print("Wrong answer - stopping!") exit(-1) + # Running the linter logger.info("Invoking linter") lint_main(fix_issues) @@ -89,6 +110,10 @@ def lint(diagnose, fix_issues, force): @click.option('--debug/--no-debug', default=True) @click.option('--generator', default=None) def test(debug, generator): + """ + Command to build and test the code base. + """ + logger.info("Building and testing") build_dir = "Debug" @@ -100,6 +125,11 @@ def test(debug, generator): @cli.command() def runci(): + """ + Command to run all CI commands, starting with style check + then linting and finally unit tests. + """ + build_dir = "Debug" style_check_main(False) @@ -116,13 +146,21 @@ def runci(): default=None, ) def create_pass(name, template): + """ + Helper command to create a new pass from a template. Templates + can be found in the template directory of the TasksCI tool. + """ + # Checking whether the target already exists target_dir = os.path.join(SOURCE_DIR, "libs", name) if os.path.exists(target_dir): logger.error("Pass '{}' already exists".format(name)) exit(-1) + # In case no template was specified, we list the option + # such that the user can choose one if template is None: + # Listing options options = [] print("Available templates:") @@ -135,6 +173,7 @@ def create_pass(name, template): pretty_template_name = re.sub(r'(? len(options) + 1: @@ -148,15 +187,22 @@ def create_pass(name, template): # Getting the template template = options[n - 1] + # Checking that the template is valid. Note that even though + # we list the templates above, the user may have specified an + # invalid template via the command line. template_dir = os.path.join(TEMPLATE_DIR, template) if not os.path.exists(template_dir): logger.error("Template does not exist") exit(-1) + # Creating an operation name by transforming the original name + # from "CamelCase" to "camel-case" operation_name = re.sub(r'(? Date: Thu, 22 Jul 2021 23:10:06 +0200 Subject: [PATCH 024/106] Small refactor --- src/{QsPasses => Passes}/.clang-format | 0 src/{QsPasses => Passes}/.clang-tidy | 0 src/{QsPasses => Passes}/CMakeLists.txt | 0 src/{QsPasses => Passes}/CONTRIBUTING.md | 0 src/{QsPasses => Passes}/Makefile | 0 src/{QsPasses => Passes}/README.md | 0 src/{QsPasses => Passes}/docs/index.md | 0 .../examples/ClassicalIrCommandline/Makefile | 0 .../examples/ClassicalIrCommandline/README.md | 0 .../classical-program.c | 0 src/{QsPasses => Passes}/include/Llvm.hpp | 0 src/{QsPasses => Passes}/libs/CMakeLists.txt | 0 .../libs/OpsCounter/OpsCounter.cpp | 0 .../libs/OpsCounter/OpsCounter.hpp | 0 src/{QsPasses => Passes}/manage | 0 src/{QsPasses => Passes}/requirements.txt | 0 .../site-packages/TasksCI/__main__.py | 0 .../site-packages/TasksCI/builder.py | 0 .../site-packages/TasksCI/cli.py | 0 .../site-packages/TasksCI/formatting.py | 0 .../site-packages/TasksCI/linting.py | 23 ++++++++++++++++--- .../site-packages/TasksCI/settings.py | 0 .../templates/FunctionPass/SPECIFICATION.md | 0 .../templates/FunctionPass/{name}.cpp.tpl | 0 .../templates/FunctionPass/{name}.hpp.tpl | 0 .../site-packages/TasksCI/toolchain.py | 0 26 files changed, 20 insertions(+), 3 deletions(-) rename src/{QsPasses => Passes}/.clang-format (100%) rename src/{QsPasses => Passes}/.clang-tidy (100%) rename src/{QsPasses => Passes}/CMakeLists.txt (100%) rename src/{QsPasses => Passes}/CONTRIBUTING.md (100%) rename src/{QsPasses => Passes}/Makefile (100%) rename src/{QsPasses => Passes}/README.md (100%) rename src/{QsPasses => Passes}/docs/index.md (100%) rename src/{QsPasses => Passes}/examples/ClassicalIrCommandline/Makefile (100%) rename src/{QsPasses => Passes}/examples/ClassicalIrCommandline/README.md (100%) rename src/{QsPasses => Passes}/examples/ClassicalIrCommandline/classical-program.c (100%) rename src/{QsPasses => Passes}/include/Llvm.hpp (100%) rename src/{QsPasses => Passes}/libs/CMakeLists.txt (100%) rename src/{QsPasses => Passes}/libs/OpsCounter/OpsCounter.cpp (100%) rename src/{QsPasses => Passes}/libs/OpsCounter/OpsCounter.hpp (100%) rename src/{QsPasses => Passes}/manage (100%) rename src/{QsPasses => Passes}/requirements.txt (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/__main__.py (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/builder.py (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/cli.py (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/formatting.py (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/linting.py (79%) rename src/{QsPasses => Passes}/site-packages/TasksCI/settings.py (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/templates/FunctionPass/SPECIFICATION.md (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/toolchain.py (100%) diff --git a/src/QsPasses/.clang-format b/src/Passes/.clang-format similarity index 100% rename from src/QsPasses/.clang-format rename to src/Passes/.clang-format diff --git a/src/QsPasses/.clang-tidy b/src/Passes/.clang-tidy similarity index 100% rename from src/QsPasses/.clang-tidy rename to src/Passes/.clang-tidy diff --git a/src/QsPasses/CMakeLists.txt b/src/Passes/CMakeLists.txt similarity index 100% rename from src/QsPasses/CMakeLists.txt rename to src/Passes/CMakeLists.txt diff --git a/src/QsPasses/CONTRIBUTING.md b/src/Passes/CONTRIBUTING.md similarity index 100% rename from src/QsPasses/CONTRIBUTING.md rename to src/Passes/CONTRIBUTING.md diff --git a/src/QsPasses/Makefile b/src/Passes/Makefile similarity index 100% rename from src/QsPasses/Makefile rename to src/Passes/Makefile diff --git a/src/QsPasses/README.md b/src/Passes/README.md similarity index 100% rename from src/QsPasses/README.md rename to src/Passes/README.md diff --git a/src/QsPasses/docs/index.md b/src/Passes/docs/index.md similarity index 100% rename from src/QsPasses/docs/index.md rename to src/Passes/docs/index.md diff --git a/src/QsPasses/examples/ClassicalIrCommandline/Makefile b/src/Passes/examples/ClassicalIrCommandline/Makefile similarity index 100% rename from src/QsPasses/examples/ClassicalIrCommandline/Makefile rename to src/Passes/examples/ClassicalIrCommandline/Makefile diff --git a/src/QsPasses/examples/ClassicalIrCommandline/README.md b/src/Passes/examples/ClassicalIrCommandline/README.md similarity index 100% rename from src/QsPasses/examples/ClassicalIrCommandline/README.md rename to src/Passes/examples/ClassicalIrCommandline/README.md diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.c b/src/Passes/examples/ClassicalIrCommandline/classical-program.c similarity index 100% rename from src/QsPasses/examples/ClassicalIrCommandline/classical-program.c rename to src/Passes/examples/ClassicalIrCommandline/classical-program.c diff --git a/src/QsPasses/include/Llvm.hpp b/src/Passes/include/Llvm.hpp similarity index 100% rename from src/QsPasses/include/Llvm.hpp rename to src/Passes/include/Llvm.hpp diff --git a/src/QsPasses/libs/CMakeLists.txt b/src/Passes/libs/CMakeLists.txt similarity index 100% rename from src/QsPasses/libs/CMakeLists.txt rename to src/Passes/libs/CMakeLists.txt diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.cpp b/src/Passes/libs/OpsCounter/OpsCounter.cpp similarity index 100% rename from src/QsPasses/libs/OpsCounter/OpsCounter.cpp rename to src/Passes/libs/OpsCounter/OpsCounter.cpp diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp b/src/Passes/libs/OpsCounter/OpsCounter.hpp similarity index 100% rename from src/QsPasses/libs/OpsCounter/OpsCounter.hpp rename to src/Passes/libs/OpsCounter/OpsCounter.hpp diff --git a/src/QsPasses/manage b/src/Passes/manage similarity index 100% rename from src/QsPasses/manage rename to src/Passes/manage diff --git a/src/QsPasses/requirements.txt b/src/Passes/requirements.txt similarity index 100% rename from src/QsPasses/requirements.txt rename to src/Passes/requirements.txt diff --git a/src/QsPasses/site-packages/TasksCI/__main__.py b/src/Passes/site-packages/TasksCI/__main__.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/__main__.py rename to src/Passes/site-packages/TasksCI/__main__.py diff --git a/src/QsPasses/site-packages/TasksCI/builder.py b/src/Passes/site-packages/TasksCI/builder.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/builder.py rename to src/Passes/site-packages/TasksCI/builder.py diff --git a/src/QsPasses/site-packages/TasksCI/cli.py b/src/Passes/site-packages/TasksCI/cli.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/cli.py rename to src/Passes/site-packages/TasksCI/cli.py diff --git a/src/QsPasses/site-packages/TasksCI/formatting.py b/src/Passes/site-packages/TasksCI/formatting.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/formatting.py rename to src/Passes/site-packages/TasksCI/formatting.py diff --git a/src/QsPasses/site-packages/TasksCI/linting.py b/src/Passes/site-packages/TasksCI/linting.py similarity index 79% rename from src/QsPasses/site-packages/TasksCI/linting.py rename to src/Passes/site-packages/TasksCI/linting.py index 4a692ab302..659be918e6 100644 --- a/src/QsPasses/site-packages/TasksCI/linting.py +++ b/src/Passes/site-packages/TasksCI/linting.py @@ -13,12 +13,19 @@ def clang_tidy_diagnose(): + """ + Helper function to print the configuration of Clang tidy + """ + + # Getting the config config = subprocess.check_output( [toolchain.discover_tidy(), '-dump-config'], cwd=PROJECT_ROOT).decode() + # Getting the list of checks check_list = subprocess.check_output( [toolchain.discover_tidy(), '-list-checks'], cwd=PROJECT_ROOT).decode() + # Printing it all to the user checks = [x.strip() for x in check_list.split("\n") if '-' in x] print("Working directory: {}".format(PROJECT_ROOT)) @@ -26,11 +33,18 @@ def clang_tidy_diagnose(): print(config) print("") print("Clang tidy checks:") + for check in sorted(checks): print(" -", check) -def run_clang_tidy(source_dir, build_dir, filename, fix_issues: bool = False): +def run_clang_tidy(build_dir, filename, fix_issues: bool = False): + """ + Function that runs Clang tidy for a single file given a build directory + and a filename. + """ + + # Configuring the command line arguments clang_tidy_binary = toolchain.discover_tidy() cmd = [clang_tidy_binary] @@ -46,6 +60,7 @@ def run_clang_tidy(source_dir, build_dir, filename, fix_issues: bool = False): cmd.append(filename) + # Getting the output p = subprocess.Popen( cmd, stdout=subprocess.PIPE, @@ -59,8 +74,10 @@ def run_clang_tidy(source_dir, build_dir, filename, fix_issues: bool = False): output = output.decode() err = err.decode() + # The return value is negative even if the user code is without + # errors, so we check whether there are any errors specified in + # error output if "error" in err: - # TODO(TFR): write output and errors to temp log file sys.stderr.write(output) sys.stderr.write(err) @@ -101,7 +118,7 @@ def main_cpp(fix_issues: bool): success = True for filename in files_to_analyse: - success = success and run_clang_tidy(source_dir, build_dir, filename, fix_issues=fix_issues) + success = success and run_clang_tidy(build_dir, filename, fix_issues=fix_issues) return success diff --git a/src/QsPasses/site-packages/TasksCI/settings.py b/src/Passes/site-packages/TasksCI/settings.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/settings.py rename to src/Passes/site-packages/TasksCI/settings.py diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/SPECIFICATION.md b/src/Passes/site-packages/TasksCI/templates/FunctionPass/SPECIFICATION.md similarity index 100% rename from src/QsPasses/site-packages/TasksCI/templates/FunctionPass/SPECIFICATION.md rename to src/Passes/site-packages/TasksCI/templates/FunctionPass/SPECIFICATION.md diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl similarity index 100% rename from src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl rename to src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl similarity index 100% rename from src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl rename to src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl diff --git a/src/QsPasses/site-packages/TasksCI/toolchain.py b/src/Passes/site-packages/TasksCI/toolchain.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/toolchain.py rename to src/Passes/site-packages/TasksCI/toolchain.py From a23b6b716588a80fd229fb4723c1a60f851f3a16 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 23 Jul 2021 11:08:52 +0200 Subject: [PATCH 025/106] Adding QIR example using opt for optimisation and refactoring library structure --- .../examples/ClassicalIrCommandline/Makefile | 6 + .../classical-program.ll | 123 ++++++++++++++ .../examples/OptimisationUsingOpt/README.md | 58 +++++++ .../SimpleExample/Makefile | 5 + .../SimpleExample/SimpleExample.csproj | 9 + .../SimpleExample/SimpleExample.qs | 10 ++ src/Passes/libs/CMakeLists.txt | 7 +- src/Passes/libs/OpsCounter/LibOpsCounter.cpp | 47 ++++++ src/Passes/libs/OpsCounter/OpsCounter.cpp | 155 +++++++----------- src/Passes/libs/OpsCounter/OpsCounter.hpp | 106 ++++++------ src/Passes/libs/OpsCounter/SPECIFICATION.md | 0 .../templates/FunctionPass/Lib{name}.cpp.tpl | 38 +++++ .../templates/FunctionPass/{name}.cpp.tpl | 31 +--- 13 files changed, 411 insertions(+), 184 deletions(-) create mode 100644 src/Passes/examples/ClassicalIrCommandline/classical-program.ll create mode 100644 src/Passes/examples/OptimisationUsingOpt/README.md create mode 100644 src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile create mode 100644 src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.csproj create mode 100644 src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.qs create mode 100644 src/Passes/libs/OpsCounter/LibOpsCounter.cpp create mode 100644 src/Passes/libs/OpsCounter/SPECIFICATION.md create mode 100644 src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl diff --git a/src/Passes/examples/ClassicalIrCommandline/Makefile b/src/Passes/examples/ClassicalIrCommandline/Makefile index f50340c5a5..2f39e8c4a4 100644 --- a/src/Passes/examples/ClassicalIrCommandline/Makefile +++ b/src/Passes/examples/ClassicalIrCommandline/Makefile @@ -1,3 +1,9 @@ +emit-llvm-cpp: + clang -O3 -S -std=c++17 -emit-llvm classical-program.cpp -o classical-program.ll + +emit-llvm-cpp-bin: + clang++ -O3 -std=c++17 -stdlib=libc++ classical-program.cpp -o a.out + emit-llvm: clang -O0 -S -emit-llvm classical-program.c -o classical-program.ll diff --git a/src/Passes/examples/ClassicalIrCommandline/classical-program.ll b/src/Passes/examples/ClassicalIrCommandline/classical-program.ll new file mode 100644 index 0000000000..5ad71d9d0b --- /dev/null +++ b/src/Passes/examples/ClassicalIrCommandline/classical-program.ll @@ -0,0 +1,123 @@ +; ModuleID = 'classical-program.cpp' +source_filename = "classical-program.cpp" +target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx11.0.0" + +%"class.std::__1::basic_ostream" = type { i32 (...)**, %"class.std::__1::basic_ios.base" } +%"class.std::__1::basic_ios.base" = type <{ %"class.std::__1::ios_base", %"class.std::__1::basic_ostream"*, i32 }> +%"class.std::__1::ios_base" = type { i32 (...)**, i32, i64, i64, i32, i32, i8*, i8*, void (i32, %"class.std::__1::ios_base"*, i32)**, i32*, i64, i64, i64*, i64, i64, i8**, i64, i64 } +%"class.std::__1::locale::id" = type <{ %"struct.std::__1::once_flag", i32, [4 x i8] }> +%"struct.std::__1::once_flag" = type { i64 } +%"class.std::__1::locale" = type { %"class.std::__1::locale::__imp"* } +%"class.std::__1::locale::__imp" = type opaque +%"class.std::__1::locale::facet" = type { %"class.std::__1::__shared_count" } +%"class.std::__1::__shared_count" = type { i32 (...)**, i64 } +%"class.std::__1::ctype" = type <{ %"class.std::__1::locale::facet", i32*, i8, [7 x i8] }> + +@_ZNSt3__14coutE = external global %"class.std::__1::basic_ostream", align 8 +@_ZNSt3__15ctypeIcE2idE = external global %"class.std::__1::locale::id", align 8 + +; Function Attrs: norecurse ssp uwtable mustprogress +define dso_local i32 @main() local_unnamed_addr #0 personality i32 (...)* @__gxx_personality_v0 { + %1 = alloca %"class.std::__1::locale", align 8 + %2 = tail call i32 @_Z9fibonaccii(i32 3) + %3 = tail call nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEi(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8) @_ZNSt3__14coutE, i32 %2) + %4 = bitcast %"class.std::__1::basic_ostream"* %3 to i8** + %5 = load i8*, i8** %4, align 8, !tbaa !3 + %6 = getelementptr i8, i8* %5, i64 -24 + %7 = bitcast i8* %6 to i64* + %8 = load i64, i64* %7, align 8 + %9 = bitcast %"class.std::__1::basic_ostream"* %3 to i8* + %10 = getelementptr inbounds i8, i8* %9, i64 %8 + %11 = bitcast %"class.std::__1::locale"* %1 to i8* + call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %11) #5 + %12 = bitcast i8* %10 to %"class.std::__1::ios_base"* + call void @_ZNKSt3__18ios_base6getlocEv(%"class.std::__1::locale"* nonnull sret(%"class.std::__1::locale") align 8 %1, %"class.std::__1::ios_base"* nonnull dereferenceable(136) %12) + %13 = invoke %"class.std::__1::locale::facet"* @_ZNKSt3__16locale9use_facetERNS0_2idE(%"class.std::__1::locale"* nonnull dereferenceable(8) %1, %"class.std::__1::locale::id"* nonnull align 8 dereferenceable(12) @_ZNSt3__15ctypeIcE2idE) + to label %14 unwind label %21 + +14: ; preds = %0 + %15 = bitcast %"class.std::__1::locale::facet"* %13 to %"class.std::__1::ctype"* + %16 = bitcast %"class.std::__1::locale::facet"* %13 to i8 (%"class.std::__1::ctype"*, i8)*** + %17 = load i8 (%"class.std::__1::ctype"*, i8)**, i8 (%"class.std::__1::ctype"*, i8)*** %16, align 8, !tbaa !3 + %18 = getelementptr inbounds i8 (%"class.std::__1::ctype"*, i8)*, i8 (%"class.std::__1::ctype"*, i8)** %17, i64 7 + %19 = load i8 (%"class.std::__1::ctype"*, i8)*, i8 (%"class.std::__1::ctype"*, i8)** %18, align 8 + %20 = invoke signext i8 %19(%"class.std::__1::ctype"* nonnull dereferenceable(25) %15, i8 signext 10) + to label %23 unwind label %21 + +21: ; preds = %14, %0 + %22 = landingpad { i8*, i32 } + cleanup + call void @_ZNSt3__16localeD1Ev(%"class.std::__1::locale"* nonnull dereferenceable(8) %1) #5 + call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %11) #5 + resume { i8*, i32 } %22 + +23: ; preds = %14 + call void @_ZNSt3__16localeD1Ev(%"class.std::__1::locale"* nonnull dereferenceable(8) %1) #5 + call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %11) #5 + %24 = call nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE3putEc(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8) %3, i8 signext %20) + %25 = call nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE5flushEv(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8) %3) + ret i32 0 +} + +declare nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEi(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8), i32) local_unnamed_addr #1 + +; Function Attrs: ssp uwtable mustprogress +define linkonce_odr i32 @_Z9fibonaccii(i32 %0) local_unnamed_addr #2 { + %2 = icmp slt i32 %0, 2 + br i1 %2, label %13, label %3 + +3: ; preds = %1, %3 + %4 = phi i32 [ %8, %3 ], [ %0, %1 ] + %5 = phi i32 [ %9, %3 ], [ 0, %1 ] + %6 = add nsw i32 %4, -1 + %7 = tail call i32 @_Z9fibonaccii(i32 %6) + %8 = add nsw i32 %4, -2 + %9 = add nsw i32 %7, %5 + %10 = icmp slt i32 %4, 4 + br i1 %10, label %11, label %3 + +11: ; preds = %3 + %12 = add i32 %9, 1 + br label %13 + +13: ; preds = %11, %1 + %14 = phi i32 [ 1, %1 ], [ %12, %11 ] + ret i32 %14 +} + +declare nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE3putEc(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8), i8 signext) local_unnamed_addr #1 + +declare nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE5flushEv(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8)) local_unnamed_addr #1 + +; Function Attrs: argmemonly nofree nosync nounwind willreturn +declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #3 + +declare void @_ZNKSt3__18ios_base6getlocEv(%"class.std::__1::locale"* sret(%"class.std::__1::locale") align 8, %"class.std::__1::ios_base"* nonnull dereferenceable(136)) local_unnamed_addr #1 + +declare i32 @__gxx_personality_v0(...) + +; Function Attrs: nounwind +declare void @_ZNSt3__16localeD1Ev(%"class.std::__1::locale"* nonnull dereferenceable(8)) unnamed_addr #4 + +; Function Attrs: argmemonly nofree nosync nounwind willreturn +declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #3 + +declare %"class.std::__1::locale::facet"* @_ZNKSt3__16locale9use_facetERNS0_2idE(%"class.std::__1::locale"* nonnull dereferenceable(8), %"class.std::__1::locale::id"* nonnull align 8 dereferenceable(12)) local_unnamed_addr #1 + +attributes #0 = { norecurse ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #2 = { ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #3 = { argmemonly nofree nosync nounwind willreturn } +attributes #4 = { nounwind "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #5 = { nounwind } + +!llvm.module.flags = !{!0, !1} +!llvm.ident = !{!2} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{!"Homebrew clang version 12.0.1"} +!3 = !{!4, !4, i64 0} +!4 = !{!"vtable pointer", !5, i64 0} +!5 = !{!"Simple C++ TBAA"} diff --git a/src/Passes/examples/OptimisationUsingOpt/README.md b/src/Passes/examples/OptimisationUsingOpt/README.md new file mode 100644 index 0000000000..741c555ea3 --- /dev/null +++ b/src/Passes/examples/OptimisationUsingOpt/README.md @@ -0,0 +1,58 @@ +# Optimisation Using Opt + +In this document, we give a brief introduction on how to perform IR optimisations +using `opt`. + +## Stripping dead code + +We start out by considering a simple case of a program that just returns 0: + +```qsharp +namespace Example { + @EntryPoint() + operation OurAwesomeQuantumProgram(nQubits : Int) : Int { + + return 0; + } +} +``` + +You find the code for this in the folder `SimpleExample`. To generate a QIR for this code, go to the folder and run + +```sh +% cd SimpleExample/ +% dotnet clean SimpleExample.csproj +(...) +% dotnet build SimpleExample.csproj -c Debug +``` + +If everything went well, you should now have a subdirectory called `qir` and inside `qir`, you will find `SimpleExample.ll`. Depending on the version of Q#, +the generated QIR will vary, but in general, it will be relatively long. Looking at this file, you will see +that the total length is a little above 2000 lines of code. That is pretty extensive for a program which essentially +does nothing so obviously, most of the generated QIR must be dead code. We can now use `opt` to get rid of the dead code and we do this by invoking: + +```sh +opt -S qir/SimpleExample.ll -O3 > qir/SimpleExample-O3.ll +``` + +All going well, this should reduce your QIR to + +```language +; Function Attrs: norecurse nounwind readnone willreturn +define i64 @Example__QuantumFunction__Interop(i64 %nQubits) local_unnamed_addr #0 { +entry: + ret i64 0 +} + +define void @Example__QuantumFunction(i64 %nQubits) local_unnamed_addr #1 { +entry: + %0 = tail call %String* @__quantum__rt__int_to_string(i64 0) + tail call void @__quantum__rt__message(%String* %0) + tail call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} +``` + +plus a few extra delcarations. + +## Applying a pass diff --git a/src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile new file mode 100644 index 0000000000..0de5d94b56 --- /dev/null +++ b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile @@ -0,0 +1,5 @@ +clean: + rm -rf bin + rm -rf obj + rm -rf qir + \ No newline at end of file diff --git a/src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.csproj b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.csproj new file mode 100644 index 0000000000..eeab572589 --- /dev/null +++ b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.csproj @@ -0,0 +1,9 @@ + + + + Exe + netcoreapp3.1 + true + + + diff --git a/src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.qs b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.qs new file mode 100644 index 0000000000..5578530c60 --- /dev/null +++ b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.qs @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Example { + @EntryPoint() + operation QuantumFunction(nQubits : Int) : Int { + + return 0; + } +} diff --git a/src/Passes/libs/CMakeLists.txt b/src/Passes/libs/CMakeLists.txt index 578a55e711..700281c6a6 100644 --- a/src/Passes/libs/CMakeLists.txt +++ b/src/Passes/libs/CMakeLists.txt @@ -10,9 +10,9 @@ macro(list_qs_passes result) set(${result} ${dirlist}) endmacro() -list_qs_passes(QS_PASSES) +list_qs_passes(ALL_PASSES) -foreach(pass_plugin ${QS_PASSES}) +foreach(pass_plugin ${ALL_PASSES}) # Getting sources file(GLOB sources RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/${pass_plugin}/*.cpp) @@ -41,3 +41,6 @@ foreach(pass_plugin ${QS_PASSES}) "$<$:-undefined dynamic_lookup>") endforeach() + + +# add_library(passes SHARED ${ALL_PASSES}) \ No newline at end of file diff --git a/src/Passes/libs/OpsCounter/LibOpsCounter.cpp b/src/Passes/libs/OpsCounter/LibOpsCounter.cpp new file mode 100644 index 0000000000..8197658a25 --- /dev/null +++ b/src/Passes/libs/OpsCounter/LibOpsCounter.cpp @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" +#include "OpsCounter/OpsCounter.hpp" + +#include +#include + +namespace { +// Interface to plugin +llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() +{ + using namespace Microsoft::Quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the printer + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "print") + { + fpm.addPass(COpsCounterPrinter(llvm::errs())); + return true; + } + return false; + }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { + fpm.addPass(COpsCounterPrinter(llvm::errs())); + }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { + fam.registerPass([] { return COpsCounterAnalytics(); }); + }); + }}; +} + +} // namespace + +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return GetOpsCounterPluginInfo(); +} diff --git a/src/Passes/libs/OpsCounter/OpsCounter.cpp b/src/Passes/libs/OpsCounter/OpsCounter.cpp index b2030f7c91..b2df78f4b8 100644 --- a/src/Passes/libs/OpsCounter/OpsCounter.cpp +++ b/src/Passes/libs/OpsCounter/OpsCounter.cpp @@ -7,113 +7,72 @@ #include #include -using namespace llvm; -namespace Microsoft +namespace Microsoft { +namespace Quantum { +COpsCounterAnalytics::Result COpsCounterAnalytics::run(llvm::Function &function, + llvm::FunctionAnalysisManager & /*unused*/) { -namespace Quantum -{ - COpsCounterAnalytics::Result COpsCounterAnalytics::run( - llvm::Function& function, - llvm::FunctionAnalysisManager& /*unused*/) - { - COpsCounterAnalytics::Result opcode_map; - for (auto& basic_block : function) - { - for (auto& instruction : basic_block) - { - if (instruction.isDebugOrPseudoInst()) - { - continue; - } - auto name = instruction.getOpcodeName(); - - if (opcode_map.find(name) == opcode_map.end()) - { - opcode_map[instruction.getOpcodeName()] = 1; - } - else - { - opcode_map[instruction.getOpcodeName()]++; - } - } - } - - return opcode_map; - } - - COpsCounterPrinter::COpsCounterPrinter(llvm::raw_ostream& out_stream) - : out_stream_(out_stream) + COpsCounterAnalytics::Result opcode_map; + for (auto &basic_block : function) + { + for (auto &instruction : basic_block) { + if (instruction.isDebugOrPseudoInst()) + { + continue; + } + auto name = instruction.getOpcodeName(); + + if (opcode_map.find(name) == opcode_map.end()) + { + opcode_map[instruction.getOpcodeName()] = 1; + } + else + { + opcode_map[instruction.getOpcodeName()]++; + } } + } - llvm::PreservedAnalyses COpsCounterPrinter::run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) - { - auto& opcode_map = fam.getResult(function); - - out_stream_ << "Stats for '" << function.getName() << "'\n"; - out_stream_ << "===========================\n"; - - constexpr auto str1 = "Opcode"; - constexpr auto str2 = "# Used"; - out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); - out_stream_ << "---------------------------" - << "\n"; - - for (auto const& instruction : opcode_map) - { - out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); - } - out_stream_ << "---------------------------" - << "\n\n"; - - return llvm::PreservedAnalyses::all(); - } - - bool COpsCounterPrinter::isRequired() - { - return true; - } + return opcode_map; +} - llvm::AnalysisKey COpsCounterAnalytics::Key; -} // namespace Quantum -} // namespace Microsoft +COpsCounterPrinter::COpsCounterPrinter(llvm::raw_ostream &out_stream) + : out_stream_(out_stream) +{} -// Interface to plugin -namespace -{ -llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() +llvm::PreservedAnalyses COpsCounterPrinter::run(llvm::Function & function, + llvm::FunctionAnalysisManager &fam) { - using namespace Microsoft::Quantum; - - return { - LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, - [](PassBuilder& pb) - { - // Registering the printer - pb.registerPipelineParsingCallback( - [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) - { - if (name == "print") - { - fpm.addPass(COpsCounterPrinter(llvm::errs())); - return true; - } - return false; - }); - - pb.registerVectorizerStartEPCallback( - [](llvm::FunctionPassManager& fpm, llvm::PassBuilder::OptimizationLevel /*level*/) - { fpm.addPass(COpsCounterPrinter(llvm::errs())); }); - - // Registering the analysis module - pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager& fam) - { fam.registerPass([] { return COpsCounterAnalytics(); }); }); - }}; + auto &opcode_map = fam.getResult(function); + + out_stream_ << "Stats for '" << function.getName() << "'\n"; + out_stream_ << "===========================\n"; + + constexpr auto str1 = "Opcode"; + constexpr auto str2 = "# Used"; + out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); + out_stream_ << "---------------------------" + << "\n"; + + for (auto const &instruction : opcode_map) + { + out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), + instruction.second); + } + out_stream_ << "---------------------------" + << "\n\n"; + + return llvm::PreservedAnalyses::all(); } -} // namespace -extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +bool COpsCounterPrinter::isRequired() { - return GetOpsCounterPluginInfo(); + return true; } + +llvm::AnalysisKey COpsCounterAnalytics::Key; + +} // namespace Quantum +} // namespace Microsoft diff --git a/src/Passes/libs/OpsCounter/OpsCounter.hpp b/src/Passes/libs/OpsCounter/OpsCounter.hpp index 0662766e59..4978b10725 100644 --- a/src/Passes/libs/OpsCounter/OpsCounter.hpp +++ b/src/Passes/libs/OpsCounter/OpsCounter.hpp @@ -4,65 +4,63 @@ #include "Llvm.hpp" -namespace Microsoft -{ -namespace Quantum -{ +namespace Microsoft { +namespace Quantum { - class COpsCounterAnalytics : public llvm::AnalysisInfoMixin - { - public: - using Result = llvm::StringMap; +class COpsCounterAnalytics : public llvm::AnalysisInfoMixin +{ +public: + using Result = llvm::StringMap; - /// Constructors and destructors - /// @{ - COpsCounterAnalytics() = default; - COpsCounterAnalytics(COpsCounterAnalytics const&) = delete; - COpsCounterAnalytics(COpsCounterAnalytics&&) = default; - ~COpsCounterAnalytics() = default; - /// @} + /// Constructors and destructors + /// @{ + COpsCounterAnalytics() = default; + COpsCounterAnalytics(COpsCounterAnalytics const &) = delete; + COpsCounterAnalytics(COpsCounterAnalytics &&) = default; + ~COpsCounterAnalytics() = default; + /// @} - /// Operators - /// @{ - COpsCounterAnalytics& operator=(COpsCounterAnalytics const&) = delete; - COpsCounterAnalytics& operator=(COpsCounterAnalytics&&) = delete; - /// @} + /// Operators + /// @{ + COpsCounterAnalytics &operator=(COpsCounterAnalytics const &) = delete; + COpsCounterAnalytics &operator=(COpsCounterAnalytics &&) = delete; + /// @} - /// Functions required by LLVM - /// @{ - Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); - /// @} - private: - static llvm::AnalysisKey Key; - friend struct llvm::AnalysisInfoMixin; - }; + /// Functions required by LLVM + /// @{ + Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/); + /// @} +private: + static llvm::AnalysisKey Key; + friend struct llvm::AnalysisInfoMixin; +}; - class COpsCounterPrinter : public llvm::PassInfoMixin - { - public: - /// Constructors and destructors - /// @{ - explicit COpsCounterPrinter(llvm::raw_ostream& out_stream); - COpsCounterPrinter() = delete; - COpsCounterPrinter(COpsCounterPrinter const&) = delete; - COpsCounterPrinter(COpsCounterPrinter&&) = default; - ~COpsCounterPrinter() = default; - /// @} +class COpsCounterPrinter : public llvm::PassInfoMixin +{ +public: + /// Constructors and destructors + /// @{ + explicit COpsCounterPrinter(llvm::raw_ostream &out_stream); + COpsCounterPrinter() = delete; + COpsCounterPrinter(COpsCounterPrinter const &) = delete; + COpsCounterPrinter(COpsCounterPrinter &&) = default; + ~COpsCounterPrinter() = default; + /// @} - /// Operators - /// @{ - COpsCounterPrinter& operator=(COpsCounterPrinter const&) = delete; - COpsCounterPrinter& operator=(COpsCounterPrinter&&) = delete; - /// @} + /// Operators + /// @{ + COpsCounterPrinter &operator=(COpsCounterPrinter const &) = delete; + COpsCounterPrinter &operator=(COpsCounterPrinter &&) = delete; + /// @} - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); - static bool isRequired(); - /// @} - private: - llvm::raw_ostream& out_stream_; - }; + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); + static bool isRequired(); + /// @} +private: + llvm::raw_ostream &out_stream_; +}; -} // namespace Quantum -} // namespace Microsoft +} // namespace Quantum +} // namespace Microsoft diff --git a/src/Passes/libs/OpsCounter/SPECIFICATION.md b/src/Passes/libs/OpsCounter/SPECIFICATION.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl new file mode 100644 index 0000000000..6ed3455886 --- /dev/null +++ b/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "{name}/{name}.hpp" + +#include "Llvm.hpp" + +#include +#include + +namespace { +llvm::PassPluginLibraryInfo Get{name}PluginInfo() +{ + using namespace Microsoft::Quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "{name}", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the pass + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "{operation_name}") + { + fpm.addPass(C{name}Pass()); + return true; + } + + return false; + }); + }}; +} +} + +// Interface for loading the plugin +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return Get{name}PluginInfo(); +} diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl index 7cb82d1f3d..c76a6ef22a 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl @@ -25,36 +25,7 @@ bool C{name}Pass::isRequired() { return true; } + } // namespace Quantum } // namespace Microsoft -// Helper functions which we do not expose externally -namespace { -llvm::PassPluginLibraryInfo Get{name}PluginInfo() -{ - using namespace Microsoft::Quantum; - using namespace llvm; - - return { - LLVM_PLUGIN_API_VERSION, "{name}", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the pass - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "{operation_name}") - { - fpm.addPass(C{name}Pass()); - return true; - } - - return false; - }); - }}; -} -} - - -// Interface for loading the plugin -extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() -{ - return Get{name}PluginInfo(); -} From 327ff7f326830f386876eae378718f21761d6c5b Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 23 Jul 2021 11:11:29 +0200 Subject: [PATCH 026/106] Adding documentation --- src/Passes/site-packages/TasksCI/linting.py | 5 +++++ src/Passes/site-packages/TasksCI/settings.py | 4 ++++ src/Passes/site-packages/TasksCI/toolchain.py | 12 ++++++++++++ 3 files changed, 21 insertions(+) diff --git a/src/Passes/site-packages/TasksCI/linting.py b/src/Passes/site-packages/TasksCI/linting.py index 659be918e6..09b3647df4 100644 --- a/src/Passes/site-packages/TasksCI/linting.py +++ b/src/Passes/site-packages/TasksCI/linting.py @@ -89,6 +89,11 @@ def run_clang_tidy(build_dir, filename, fix_issues: bool = False): def main_cpp(fix_issues: bool): + """ + Main function for C++ linting. This function builds and lints + the code. + """ + logger.info("Linting") build_dir = os.path.join(PROJECT_ROOT, "Debug") source_dir = os.path.join(PROJECT_ROOT, "src") diff --git a/src/Passes/site-packages/TasksCI/settings.py b/src/Passes/site-packages/TasksCI/settings.py index 85d5b667f2..6db2e1fbf4 100644 --- a/src/Passes/site-packages/TasksCI/settings.py +++ b/src/Passes/site-packages/TasksCI/settings.py @@ -10,4 +10,8 @@ def get_concurrency(): + """ + Function that gives a default concurrency for the compilation + and testing process. + """ return min(MAX_CONCURRENCY, multiprocessing.cpu_count()) diff --git a/src/Passes/site-packages/TasksCI/toolchain.py b/src/Passes/site-packages/TasksCI/toolchain.py index 60ceb80b4a..52757ff349 100644 --- a/src/Passes/site-packages/TasksCI/toolchain.py +++ b/src/Passes/site-packages/TasksCI/toolchain.py @@ -5,16 +5,28 @@ def discover_formatter(): + """ + Finds the clang-format executable + """ return shutil.which("clang-format") def discover_tidy(): + """ + Finds the clang-tidy executable + """ return shutil.which("clang-tidy") def discover_cmake(): + """ + Finds the cmake executable + """ return shutil.which("cmake") def discover_ctest(): + """ + Finds the ctest executable + """ return shutil.which("ctest") From 0ee8249bca9b3181176836c20bf5a46c81a7ca4d Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 23 Jul 2021 12:49:32 +0200 Subject: [PATCH 027/106] Updating linter and formatter --- src/Passes/.clang-format | 13 +- src/Passes/.clang-tidy | 40 +++++- src/Passes/libs/OpsCounter/LibOpsCounter.cpp | 63 +++++----- src/Passes/libs/OpsCounter/OpsCounter.cpp | 116 +++++++++--------- src/Passes/libs/OpsCounter/OpsCounter.hpp | 107 ++++++++-------- src/Passes/site-packages/TasksCI/linting.py | 17 +-- .../templates/FunctionPass/Lib{name}.cpp.tpl | 10 +- .../templates/FunctionPass/{name}.cpp.tpl | 12 +- .../templates/FunctionPass/{name}.hpp.tpl | 22 ++-- 9 files changed, 224 insertions(+), 176 deletions(-) diff --git a/src/Passes/.clang-format b/src/Passes/.clang-format index 329e8f956d..379a41ff3c 100644 --- a/src/Passes/.clang-format +++ b/src/Passes/.clang-format @@ -36,13 +36,20 @@ SpaceBeforeParens: ControlStatements DerivePointerAlignment: false PointerAlignment: Left -# Suggestion +# Suggestions Standard: Cpp11 AlignConsecutiveAssignments: true AlignConsecutiveDeclarations: true AlignTrailingComments: true +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 2 + +IndentCaseLabels: false +# NamespaceIndentation: None + # Ensures include compleness +IncludeBlocks: Regroup IncludeCategories: - Regex: '.*\.\..*' Priority: 1 @@ -57,4 +64,6 @@ IncludeCategories: - Regex: '.*' Priority: 2 IncludeIsMainRegex: '' - +SortIncludes: true +SortUsingDeclarations: true +SpaceInEmptyParentheses: false diff --git a/src/Passes/.clang-tidy b/src/Passes/.clang-tidy index d1f58c04c2..dfec20a924 100644 --- a/src/Passes/.clang-tidy +++ b/src/Passes/.clang-tidy @@ -26,23 +26,51 @@ HeaderFilterRegex: '.*' CheckOptions: - key: readability-identifier-naming.ClassCase value: 'CamelCase' - - key: readability-identifier-naming.ClassPrefix - value: 'C' - key: readability-identifier-naming.AbstractClassPrefix value: 'I' - key: readability-identifier-naming.StructCase value: 'CamelCase' - key: readability-identifier-naming.ParameterCase - value: 'camelBack' + value: 'lower_case' - key: readability-identifier-naming.PrivateMemberCase - value: 'camelBack' + value: 'lower_case' + - key: readability-identifier-naming.PrivateMemberSuffix + value: '_' + - key: readability-identifier-naming.ProtectedMemberCase + value: 'lower_case' + - key: readability-identifier-naming.ProtectedMemberSuffix + value: '_' + - key: readability-identifier-naming.VariableCase + value: 'lower_case' - key: readability-identifier-naming.LocalVariableCase - value: 'camelBack' + value: 'lower_case' - key: readability-identifier-naming.TypeAliasCase value: 'CamelCase' - key: readability-identifier-naming.UnionCase value: 'CamelCase' - key: readability-identifier-naming.FunctionCase - value: 'CamelCase' + value: 'camelBack' - key: readability-identifier-naming.NamespaceCase + value: 'lower_case' + - key: readability-identifier-naming.GlobalConstantCase + value: 'UPPER_CASE' + - key: readability-identifier-naming.EnumCase + value: 'CamelCase' + - key: readability-identifier-naming.EnumConstantCase + value: 'CamelCase' + - key: readability-identifier-naming.GlobalConstantPrefix + value: 'G_' + - key: readability-identifier-naming.ConstantCase + value: 'UPPER_CASE' + - key: readability-identifier-naming.MacroDefinitionCase + value: 'UPPER_CASE' + - key: readability-identifier-naming.TypeAliasCase + value: 'CamelCase' + - key: readability-identifier-naming.TypedefCase value: 'CamelCase' + - key: readability-identifier-naming.IgnoreMainLikeFunctions + value: true + - key: readability-identifier-naming.StaticVariableCase + value: 'lower_case' + - key: readability-identifier-naming.StaticVariablePrefix + value: 'h_' diff --git a/src/Passes/libs/OpsCounter/LibOpsCounter.cpp b/src/Passes/libs/OpsCounter/LibOpsCounter.cpp index 8197658a25..65a7a238b9 100644 --- a/src/Passes/libs/OpsCounter/LibOpsCounter.cpp +++ b/src/Passes/libs/OpsCounter/LibOpsCounter.cpp @@ -2,46 +2,49 @@ // Licensed under the MIT License. #include "Llvm.hpp" + #include "OpsCounter/OpsCounter.hpp" #include #include -namespace { +namespace +{ // Interface to plugin -llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() +llvm::PassPluginLibraryInfo getOpsCounterPluginInfo() { - using namespace Microsoft::Quantum; - using namespace llvm; - - return { - LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the printer - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "print") - { - fpm.addPass(COpsCounterPrinter(llvm::errs())); - return true; - } - return false; - }); - - pb.registerVectorizerStartEPCallback( - [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { - fpm.addPass(COpsCounterPrinter(llvm::errs())); - }); - - // Registering the analysis module - pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { - fam.registerPass([] { return COpsCounterAnalytics(); }); - }); - }}; + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, + [](PassBuilder& pb) + { + // Registering the printer + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) + { + if (name == "print") + { + fpm.addPass(OpsCounterPrinter(llvm::errs())); + return true; + } + return false; + }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager& fpm, llvm::PassBuilder::OptimizationLevel /*level*/) + { fpm.addPass(OpsCounterPrinter(llvm::errs())); }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager& fam) + { fam.registerPass([] { return OpsCounterAnalytics(); }); }); + }}; } -} // namespace +} // namespace extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return GetOpsCounterPluginInfo(); + return getOpsCounterPluginInfo(); } diff --git a/src/Passes/libs/OpsCounter/OpsCounter.cpp b/src/Passes/libs/OpsCounter/OpsCounter.cpp index b2df78f4b8..f642970b1a 100644 --- a/src/Passes/libs/OpsCounter/OpsCounter.cpp +++ b/src/Passes/libs/OpsCounter/OpsCounter.cpp @@ -1,78 +1,80 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "OpsCounter/OpsCounter.hpp" - #include "Llvm.hpp" +#include "OpsCounter/OpsCounter.hpp" + #include #include -namespace Microsoft { -namespace Quantum { -COpsCounterAnalytics::Result COpsCounterAnalytics::run(llvm::Function &function, - llvm::FunctionAnalysisManager & /*unused*/) +namespace microsoft { - COpsCounterAnalytics::Result opcode_map; - for (auto &basic_block : function) - { - for (auto &instruction : basic_block) +namespace quantum +{ + OpsCounterAnalytics::Result OpsCounterAnalytics::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& /*unused*/) { - if (instruction.isDebugOrPseudoInst()) - { - continue; - } - auto name = instruction.getOpcodeName(); + OpsCounterAnalytics::Result opcode_map; + for (auto& basic_block : function) + { + for (auto& instruction : basic_block) + { + if (instruction.isDebugOrPseudoInst()) + { + continue; + } + auto name = instruction.getOpcodeName(); - if (opcode_map.find(name) == opcode_map.end()) - { - opcode_map[instruction.getOpcodeName()] = 1; - } - else - { - opcode_map[instruction.getOpcodeName()]++; - } - } - } + if (opcode_map.find(name) == opcode_map.end()) + { + opcode_map[instruction.getOpcodeName()] = 1; + } + else + { + opcode_map[instruction.getOpcodeName()]++; + } + } + } - return opcode_map; -} + return opcode_map; + } -COpsCounterPrinter::COpsCounterPrinter(llvm::raw_ostream &out_stream) - : out_stream_(out_stream) -{} + OpsCounterPrinter::OpsCounterPrinter(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) + { + } -llvm::PreservedAnalyses COpsCounterPrinter::run(llvm::Function & function, - llvm::FunctionAnalysisManager &fam) -{ - auto &opcode_map = fam.getResult(function); + llvm::PreservedAnalyses OpsCounterPrinter::run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) + { + auto& opcode_map = fam.getResult(function); - out_stream_ << "Stats for '" << function.getName() << "'\n"; - out_stream_ << "===========================\n"; + out_stream_ << "Stats for '" << function.getName() << "'\n"; + out_stream_ << "===========================\n"; - constexpr auto str1 = "Opcode"; - constexpr auto str2 = "# Used"; - out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); - out_stream_ << "---------------------------" - << "\n"; + constexpr auto STR1 = "Opcode"; + constexpr auto STR2 = "# Used"; + out_stream_ << llvm::format("%-15s %-8s\n", STR1, STR2); + out_stream_ << "---------------------------" + << "\n"; - for (auto const &instruction : opcode_map) - { - out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), - instruction.second); - } - out_stream_ << "---------------------------" - << "\n\n"; + for (auto const& instruction : opcode_map) + { + out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); + } + out_stream_ << "---------------------------" + << "\n\n"; - return llvm::PreservedAnalyses::all(); -} + return llvm::PreservedAnalyses::all(); + } -bool COpsCounterPrinter::isRequired() -{ - return true; -} + bool OpsCounterPrinter::isRequired() + { + return true; + } -llvm::AnalysisKey COpsCounterAnalytics::Key; + llvm::AnalysisKey OpsCounterAnalytics::Key; -} // namespace Quantum -} // namespace Microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/OpsCounter/OpsCounter.hpp b/src/Passes/libs/OpsCounter/OpsCounter.hpp index 4978b10725..09f73362f1 100644 --- a/src/Passes/libs/OpsCounter/OpsCounter.hpp +++ b/src/Passes/libs/OpsCounter/OpsCounter.hpp @@ -4,63 +4,66 @@ #include "Llvm.hpp" -namespace Microsoft { -namespace Quantum { - -class COpsCounterAnalytics : public llvm::AnalysisInfoMixin +namespace microsoft +{ +namespace quantum { -public: - using Result = llvm::StringMap; - /// Constructors and destructors - /// @{ - COpsCounterAnalytics() = default; - COpsCounterAnalytics(COpsCounterAnalytics const &) = delete; - COpsCounterAnalytics(COpsCounterAnalytics &&) = default; - ~COpsCounterAnalytics() = default; - /// @} + class OpsCounterAnalytics : public llvm::AnalysisInfoMixin + { + public: + using Result = llvm::StringMap; - /// Operators - /// @{ - COpsCounterAnalytics &operator=(COpsCounterAnalytics const &) = delete; - COpsCounterAnalytics &operator=(COpsCounterAnalytics &&) = delete; - /// @} + /// Constructors and destructors + /// @{ + OpsCounterAnalytics() = default; + OpsCounterAnalytics(OpsCounterAnalytics const&) = delete; + OpsCounterAnalytics(OpsCounterAnalytics&&) = default; + ~OpsCounterAnalytics() = default; + /// @} - /// Functions required by LLVM - /// @{ - Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/); - /// @} -private: - static llvm::AnalysisKey Key; - friend struct llvm::AnalysisInfoMixin; -}; + /// Operators + /// @{ + OpsCounterAnalytics& operator=(OpsCounterAnalytics const&) = delete; + OpsCounterAnalytics& operator=(OpsCounterAnalytics&&) = delete; + /// @} -class COpsCounterPrinter : public llvm::PassInfoMixin -{ -public: - /// Constructors and destructors - /// @{ - explicit COpsCounterPrinter(llvm::raw_ostream &out_stream); - COpsCounterPrinter() = delete; - COpsCounterPrinter(COpsCounterPrinter const &) = delete; - COpsCounterPrinter(COpsCounterPrinter &&) = default; - ~COpsCounterPrinter() = default; - /// @} + /// Functions required by LLVM + /// @{ + Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); + /// @} + + private: + static llvm::AnalysisKey Key; // NOLINT + friend struct llvm::AnalysisInfoMixin; + }; + + class OpsCounterPrinter : public llvm::PassInfoMixin + { + public: + /// Constructors and destructors + /// @{ + explicit OpsCounterPrinter(llvm::raw_ostream& out_stream); + OpsCounterPrinter() = delete; + OpsCounterPrinter(OpsCounterPrinter const&) = delete; + OpsCounterPrinter(OpsCounterPrinter&&) = default; + ~OpsCounterPrinter() = default; + /// @} - /// Operators - /// @{ - COpsCounterPrinter &operator=(COpsCounterPrinter const &) = delete; - COpsCounterPrinter &operator=(COpsCounterPrinter &&) = delete; - /// @} + /// Operators + /// @{ + OpsCounterPrinter& operator=(OpsCounterPrinter const&) = delete; + OpsCounterPrinter& operator=(OpsCounterPrinter&&) = delete; + /// @} - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); - static bool isRequired(); - /// @} -private: - llvm::raw_ostream &out_stream_; -}; + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} + private: + llvm::raw_ostream& out_stream_; + }; -} // namespace Quantum -} // namespace Microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/site-packages/TasksCI/linting.py b/src/Passes/site-packages/TasksCI/linting.py index 09b3647df4..fc672d8b20 100644 --- a/src/Passes/site-packages/TasksCI/linting.py +++ b/src/Passes/site-packages/TasksCI/linting.py @@ -50,7 +50,7 @@ def run_clang_tidy(build_dir, filename, fix_issues: bool = False): cmd = [clang_tidy_binary] output_file = os.path.abspath(os.path.join(build_dir, 'clang_tidy_fixes.yaml')) - cmd.append('-header-filter=".*(QsPasses)\\/(src).*\\.hpp$"') + cmd.append('-header-filter=".*\\/(Passes)\\/(libs)\\/.*"') cmd.append('-p=' + build_dir) cmd.append('-export-fixes={}'.format(output_file)) cmd.append('--use-color') @@ -60,19 +60,23 @@ def run_clang_tidy(build_dir, filename, fix_issues: bool = False): cmd.append(filename) + logger.info("Running '{}'".format(" ".join(cmd))) + # Getting the output p = subprocess.Popen( - cmd, + " ".join(cmd), stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, - cwd=PROJECT_ROOT) + cwd=PROJECT_ROOT, + shell=True) output, err = p.communicate() + output = output.decode() + err = err.decode() + if p.returncode != 0: - output = output.decode() - err = err.decode() # The return value is negative even if the user code is without # errors, so we check whether there are any errors specified in @@ -96,7 +100,7 @@ def main_cpp(fix_issues: bool): logger.info("Linting") build_dir = os.path.join(PROJECT_ROOT, "Debug") - source_dir = os.path.join(PROJECT_ROOT, "src") + source_dir = os.path.join(PROJECT_ROOT, "libs") generator = None extensions = ["cpp"] @@ -112,7 +116,6 @@ def main_cpp(fix_issues: bool): files_to_analyse = [] for root, dirs, files in os.walk(source_dir): - for filename in files: if "." not in filename: continue diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl index 6ed3455886..3986121e81 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl @@ -9,9 +9,9 @@ #include namespace { -llvm::PassPluginLibraryInfo Get{name}PluginInfo() +llvm::PassPluginLibraryInfo get{name}PluginInfo() { - using namespace Microsoft::Quantum; + using namespace microsoft::quantum; using namespace llvm; return { @@ -21,7 +21,7 @@ llvm::PassPluginLibraryInfo Get{name}PluginInfo() ArrayRef /*unused*/) { if (name == "{operation_name}") { - fpm.addPass(C{name}Pass()); + fpm.addPass({name}Pass()); return true; } @@ -29,10 +29,10 @@ llvm::PassPluginLibraryInfo Get{name}PluginInfo() }); }}; } -} +} // namespace // Interface for loading the plugin extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return Get{name}PluginInfo(); + return get{name}PluginInfo(); } diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl index c76a6ef22a..27804b5c1c 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl @@ -8,11 +8,11 @@ #include #include -namespace Microsoft +namespace microsoft { -namespace Quantum +namespace quantum { -llvm::PreservedAnalyses C{name}Pass::run(llvm::Function &function, llvm::FunctionAnalysisManager &/*fam*/) +llvm::PreservedAnalyses {name}Pass::run(llvm::Function &function, llvm::FunctionAnalysisManager &/*fam*/) { // Pass body @@ -21,11 +21,11 @@ llvm::PreservedAnalyses C{name}Pass::run(llvm::Function &function, llvm::Functio return llvm::PreservedAnalyses::all(); } -bool C{name}Pass::isRequired() +bool {name}Pass::isRequired() { return true; } -} // namespace Quantum -} // namespace Microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl index d413f43e24..24e5beaa7b 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl @@ -4,26 +4,26 @@ #include "Llvm.hpp" -namespace Microsoft +namespace microsoft { -namespace Quantum +namespace quantum { -class C{name}Pass : public llvm::PassInfoMixin +class {name}Pass : public llvm::PassInfoMixin<{name}Pass> { public: /// Constructors and destructors /// @{ - C{name}Pass() = default; - C{name}Pass(C{name}Pass const &) = default; - C{name}Pass(C{name}Pass &&) = default; - ~C{name}Pass() = default; + {name}Pass() = default; + {name}Pass({name}Pass const &) = default; + {name}Pass({name}Pass &&) = default; + ~{name}Pass() = default; /// @} /// Operators /// @{ - C{name}Pass &operator=(C{name}Pass const &) = default; - C{name}Pass &operator=(C{name}Pass &&) = default; + {name}Pass &operator=({name}Pass const &) = default; + {name}Pass &operator=({name}Pass &&) = default; /// @} /// Functions required by LLVM @@ -33,5 +33,5 @@ public: /// @} }; -} // namespace Quantum -} // namespace Microsoft +} // namespace quantum +} // namespace microsoft From 6c3c896c6ccdce57a1755fa53f6378fa92c3fa5b Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 09:43:02 +0200 Subject: [PATCH 028/106] Updating code to meet PR comments --- src/Passes/.clang-tidy | 75 +++++++++++++------ src/Passes/CMakeLists.txt | 7 +- src/Passes/Makefile | 3 + src/Passes/README.md | 27 ++++--- .../examples/ClassicalIrCommandline/Makefile | 3 - .../examples/OptimisationUsingOpt/README.md | 2 - src/Passes/manage | 2 + 7 files changed, 76 insertions(+), 43 deletions(-) diff --git a/src/Passes/.clang-tidy b/src/Passes/.clang-tidy index dfec20a924..a0f64b8d21 100644 --- a/src/Passes/.clang-tidy +++ b/src/Passes/.clang-tidy @@ -1,6 +1,7 @@ Checks: "-*,bugprone-*,\ -readability-*,\ readability-identifier-*,\ +readability-redundant-member-init,\ readability-braces-around-statements,\ cert-dcl*,\ cert-env*,\ @@ -16,6 +17,22 @@ google-runtime-operator,\ hicpp-exception-baseclass,\ hicpp-explicit-conversions,\ hicpp-use-*,\ +modernize-avoid-bind,\ +modernize-loop-convert,\ +modernize-make-shared,\ +modernize-make-unique,\ +modernize-redundant-void-arg,\ +modernize-replace-random-shuffle,\ +modernize-shrink-to-fit,\ +modernize-use-bool-literals,\ +modernize-use-default-member-init,\ +modernize-use-emplace,\ +modernize-use-equals-default,\ +modernize-use-equals-delete,\ +modernize-use-noexcept,\ +modernize-use-nullptr,\ +modernize-use-override,\ +modernize-use-transparent-functors,\ misc-*,\ -misc-misplaced-widening-cast,\ performance-*" @@ -24,14 +41,22 @@ WarningsAsErrors: '*' HeaderFilterRegex: '.*' CheckOptions: - - key: readability-identifier-naming.ClassCase - value: 'CamelCase' + # Configuration documentation: https://clang.llvm.org/extra/clang-tidy/checks/readability-identifier-naming.html + # Namespaces + - key: readability-identifier-naming.NamespaceCase + value: 'lower_case' + + # Classes and structs - key: readability-identifier-naming.AbstractClassPrefix value: 'I' + - key: readability-identifier-naming.ClassCase + value: 'CamelCase' - key: readability-identifier-naming.StructCase value: 'CamelCase' - - key: readability-identifier-naming.ParameterCase - value: 'lower_case' + - key: readability-identifier-naming.UnionCase + value: 'CamelCase' + + # Class members - key: readability-identifier-naming.PrivateMemberCase value: 'lower_case' - key: readability-identifier-naming.PrivateMemberSuffix @@ -40,37 +65,39 @@ CheckOptions: value: 'lower_case' - key: readability-identifier-naming.ProtectedMemberSuffix value: '_' - - key: readability-identifier-naming.VariableCase - value: 'lower_case' - - key: readability-identifier-naming.LocalVariableCase - value: 'lower_case' + + # Alias - key: readability-identifier-naming.TypeAliasCase value: 'CamelCase' - - key: readability-identifier-naming.UnionCase + - key: readability-identifier-naming.TypedefCase value: 'CamelCase' + + # Functions - key: readability-identifier-naming.FunctionCase value: 'camelBack' - - key: readability-identifier-naming.NamespaceCase + - key: readability-identifier-naming.IgnoreMainLikeFunctions + value: true + + # Variables and parameters + - key: readability-identifier-naming.VariableCase + value: 'lower_case' + - key: readability-identifier-naming.LocalVariableCase + value: 'lower_case' + - key: readability-identifier-naming.ParameterCase value: 'lower_case' + + # Globals, consts and enums - key: readability-identifier-naming.GlobalConstantCase value: 'UPPER_CASE' - - key: readability-identifier-naming.EnumCase - value: 'CamelCase' - - key: readability-identifier-naming.EnumConstantCase - value: 'CamelCase' - key: readability-identifier-naming.GlobalConstantPrefix value: 'G_' - key: readability-identifier-naming.ConstantCase value: 'UPPER_CASE' - - key: readability-identifier-naming.MacroDefinitionCase - value: 'UPPER_CASE' - - key: readability-identifier-naming.TypeAliasCase + - key: readability-identifier-naming.EnumCase value: 'CamelCase' - - key: readability-identifier-naming.TypedefCase + - key: readability-identifier-naming.EnumConstantCase value: 'CamelCase' - - key: readability-identifier-naming.IgnoreMainLikeFunctions - value: true - - key: readability-identifier-naming.StaticVariableCase - value: 'lower_case' - - key: readability-identifier-naming.StaticVariablePrefix - value: 'h_' + + # Macros + - key: readability-identifier-naming.MacroDefinitionCase + value: 'UPPER_CASE' diff --git a/src/Passes/CMakeLists.txt b/src/Passes/CMakeLists.txt index 49fb66942b..0a55f495dd 100644 --- a/src/Passes/CMakeLists.txt +++ b/src/Passes/CMakeLists.txt @@ -9,9 +9,10 @@ message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") # Setting the standard configuration for the C++ compiler -# Rather than allowing C++17, we restrict the ourselves to -# C++14 as this is the standard currently used by LLVM. While -# there is a very small change that the difference in standard +# Rather than allowing C++17, we restrict ourselves to +# C++14 as this is the standard currently used by the LLVM +# project for compilation of the framework. While there is +# a very small chance that the difference in standard # would break things, it is a possibility nonetheless. set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) diff --git a/src/Passes/Makefile b/src/Passes/Makefile index e039211bb9..919e20dc8a 100644 --- a/src/Passes/Makefile +++ b/src/Passes/Makefile @@ -1,3 +1,6 @@ +nothing: + @echo "Preventing the user from accidently running the clean command." + clean: rm -rf Release/ rm -rf Debug/ diff --git a/src/Passes/README.md b/src/Passes/README.md index ade1b0c9f4..e8107da1e8 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -1,13 +1,13 @@ # Q# Passes for LLVM -This library defines LLVM passes used for analysing, optimising and transforming the IR. The Q# pass library is a dynamic library that can be compiled and ran separately from the -rest of the project code. While it is not clear whether this possible at the moment, we hope that it will be possible to write passes that enforce the QIR standard. +This library defines [LLVM passes](https://llvm.org/docs/Passes.html) used for analysing, optimising and transforming the IR. The Q# pass library is a dynamic library that can be compiled and ran separately from the +rest of the project code. While it is not clear whether this possible at the moment, we hope that it will be possible to write passes that enforce the [QIR specification](https://github.com/microsoft/qsharp-language/tree/main/Specifications/QIR). -## What does LLVM passes do? +## What do LLVM passes do? -Before getting started, we here provide a few examples of classical use cases for LLVM passes. +Before getting started, we here provide a few examples of classical use cases for [LLVM passes](https://llvm.org/docs/Passes.html). You find additional [instructive examples here][1]. -**Example 1: Transformation**. As a first example of what LLVM passes can do, we look at optimisation. Consider a compiler which +**Example 1: Transformation**. As a first example of what [LLVM passes](https://llvm.org/docs/Passes.html) can do, we look at optimisation. Consider a compiler which compiles ```c @@ -37,7 +37,7 @@ double test(double x) { } ``` -One purpose of LLVM passes is to allow automatic transformation from the above IR to the IR: +One purpose of [LLVM passes](https://llvm.org/docs/Passes.html) is to allow automatic transformation from the above IR to the IR: ``` define double @test(double %x) { @@ -138,12 +138,13 @@ call 2 --------------------------- ``` -**Example 3: Code validation**. A third use case is code validation. For example, one could write a pass to check whether bounds are exceeded on static arrays [2]. +**Example 3: Code validation**. A third use case is code validation. For example, one could write a pass to check whether bounds are exceeded on [static arrays][2]. Note that this is a non-standard usecase as such analysis is usually made using the AST rather than at the IR level. **References** -[1] https://github.com/banach-space/llvm-tutor#analysis-vs-transformation-pass -[2] https://github.com/victor-fdez/llvm-array-check-pass + +- [1] https://github.com/banach-space/llvm-tutor#analysis-vs-transformation-pass +- [2] https://github.com/victor-fdez/llvm-array-check-pass ## Out-of-source Pass @@ -184,12 +185,16 @@ and then make your target make [target] ``` +Valid targets are the name of the folders in `libs/` found in the passes root. + ## Running a pass -You can run a pass using `opt` as follows: +You can run a pass using [opt](https://llvm.org/docs/CommandGuide/opt.html) as follows: ```sh -opt -load-pass-plugin ../../{Debug,Release}/libQSharpPasses.{dylib,so} --passes="operation-counter" -disable-output classical-program.bc +cd examples/ClassicalIrCommandline +make emit-llvm-bc +opt -load-pass-plugin ../../{Debug,Release}/libOpsCounter.{dylib,so} --passes="print" -disable-output classical-program.bc ``` For a gentle introduction, see examples. diff --git a/src/Passes/examples/ClassicalIrCommandline/Makefile b/src/Passes/examples/ClassicalIrCommandline/Makefile index 2f39e8c4a4..64a96c1266 100644 --- a/src/Passes/examples/ClassicalIrCommandline/Makefile +++ b/src/Passes/examples/ClassicalIrCommandline/Makefile @@ -1,9 +1,6 @@ emit-llvm-cpp: clang -O3 -S -std=c++17 -emit-llvm classical-program.cpp -o classical-program.ll -emit-llvm-cpp-bin: - clang++ -O3 -std=c++17 -stdlib=libc++ classical-program.cpp -o a.out - emit-llvm: clang -O0 -S -emit-llvm classical-program.c -o classical-program.ll diff --git a/src/Passes/examples/OptimisationUsingOpt/README.md b/src/Passes/examples/OptimisationUsingOpt/README.md index 741c555ea3..7f84f1b2d5 100644 --- a/src/Passes/examples/OptimisationUsingOpt/README.md +++ b/src/Passes/examples/OptimisationUsingOpt/README.md @@ -54,5 +54,3 @@ entry: ``` plus a few extra delcarations. - -## Applying a pass diff --git a/src/Passes/manage b/src/Passes/manage index d49d9f2b6f..4da1f9bd64 100755 --- a/src/Passes/manage +++ b/src/Passes/manage @@ -9,4 +9,6 @@ sys.path.insert(0, os.path.join(ROOT, "site-packages")) # Loading the CLI tool and running it from TasksCI.cli import cli # noqa: E402 + +# Running the CLI tool defined in site-packages/TasksCI/cli cli() From df3e4d2f162063584a7c212a9da6199778ebad7e Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 10:19:31 +0200 Subject: [PATCH 029/106] Adding function analysis template --- .../FunctionAnalysis/Lib{name}.cpp.tpl | 47 ++++++++++++++ .../FunctionAnalysis/SPECIFICATION.md | 0 .../templates/FunctionAnalysis/{name}.cpp.tpl | 41 ++++++++++++ .../templates/FunctionAnalysis/{name}.hpp.tpl | 64 +++++++++++++++++++ 4 files changed, 152 insertions(+) create mode 100644 src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl create mode 100644 src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/SPECIFICATION.md create mode 100644 src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl create mode 100644 src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl new file mode 100644 index 0000000000..92121f04bf --- /dev/null +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" +#include "{name}/{name}.hpp" + +#include +#include + +namespace { +// Interface to plugin +llvm::PassPluginLibraryInfo get{name}PluginInfo() +{ + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "{name}", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the printer + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "{operation-name}") + { + fpm.addPass({name}Pass()); + return true; + } + return false; + }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { + fpm.addPass({name}Pass()); + }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { + fam.registerPass([] { return {name}Analytics(); }); + }); + }}; +} + +} // namespace + +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return get{name}PluginInfo(); +} diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/SPECIFICATION.md b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/SPECIFICATION.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl new file mode 100644 index 0000000000..3345c92eb1 --- /dev/null +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "{name}/{name}.hpp" + +#include "Llvm.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +{name}Analytics::Result {name}Analytics::run(llvm::Function &/*function*/, + llvm::FunctionAnalysisManager & /*unused*/) +{ + {name}Analytics::Result result; + + // Collect analytics here + + return result; +} + +llvm::PreservedAnalyses {name}Pass::run(llvm::Function & /*function*/, + llvm::FunctionAnalysisManager & /*fam*/) +{ + // auto &results = fam.getResult<{name}Analytics>(function); + + // Use analytics here + + return llvm::PreservedAnalyses::all(); +} + +bool {name}Pass::isRequired() +{ + return true; +} + +llvm::AnalysisKey {name}Analytics::Key; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl new file mode 100644 index 0000000000..9c13246f07 --- /dev/null +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl @@ -0,0 +1,64 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" + +namespace microsoft { +namespace quantum { + +class {name}Analytics : public llvm::AnalysisInfoMixin<{name}Analytics> +{ +public: + using Result = llvm::StringMap; ///< Change the type of the collected date here + + /// Constructors and destructors + /// @{ + {name}Analytics() = default; + {name}Analytics({name}Analytics const &) = delete; + {name}Analytics({name}Analytics &&) = default; + ~{name}Analytics() = default; + /// @} + + /// Operators + /// @{ + {name}Analytics &operator=({name}Analytics const &) = delete; + {name}Analytics &operator=({name}Analytics &&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + Result run(llvm::Function & function, llvm::FunctionAnalysisManager & /*unused*/); + /// @} + +private: + static llvm::AnalysisKey Key; // NOLINT + friend struct llvm::AnalysisInfoMixin<{name}Analytics>; +}; + +class {name}Pass : public llvm::PassInfoMixin<{name}Pass> +{ +public: + /// Constructors and destructors + /// @{ + {name}Pass() = default; + {name}Pass({name}Pass const &) = delete; + {name}Pass({name}Pass &&) = default; + ~{name}Pass() = default; + /// @} + + /// Operators + /// @{ + {name}Pass &operator=({name}Pass const &) = delete; + {name}Pass &operator=({name}Pass &&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function & function, llvm::FunctionAnalysisManager & fam); + static bool isRequired(); + /// @} +}; + +} // namespace quantum +} // namespace microsoft From 62e64cb892761ce613fee0cbbb800783a2732d43 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 10:30:02 +0200 Subject: [PATCH 030/106] Update of template --- .../FunctionAnalysis/Lib{name}.cpp.tpl | 8 ++++---- .../templates/FunctionAnalysis/{name}.cpp.tpl | 11 +++++++++-- .../templates/FunctionAnalysis/{name}.hpp.tpl | 17 ++++++++++------- 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl index 92121f04bf..1f55809910 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl @@ -16,12 +16,12 @@ llvm::PassPluginLibraryInfo get{name}PluginInfo() return { LLVM_PLUGIN_API_VERSION, "{name}", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the printer + // Registering a printer for the anaylsis pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, ArrayRef /*unused*/) { - if (name == "{operation-name}") + if (name == "print<{operation-name}>") { - fpm.addPass({name}Pass()); + fpm.addPass({name}Printer(llvm::errs())); return true; } return false; @@ -29,7 +29,7 @@ llvm::PassPluginLibraryInfo get{name}PluginInfo() pb.registerVectorizerStartEPCallback( [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { - fpm.addPass({name}Pass()); + fpm.addPass({name}Printer(llvm::errs())); }); // Registering the analysis module diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl index 3345c92eb1..1d0eea61b4 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl @@ -20,17 +20,24 @@ namespace quantum { return result; } -llvm::PreservedAnalyses {name}Pass::run(llvm::Function & /*function*/, + +{name}Printer::{name}Printer(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) +{ +} + +llvm::PreservedAnalyses {name}Printer::run(llvm::Function & /*function*/, llvm::FunctionAnalysisManager & /*fam*/) { // auto &results = fam.getResult<{name}Analytics>(function); // Use analytics here + out_stream_ << "Analysis results are printed using this stream\n"; return llvm::PreservedAnalyses::all(); } -bool {name}Pass::isRequired() +bool {name}Printer::isRequired() { return true; } diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl index 9c13246f07..5dd8e664d9 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl @@ -36,21 +36,22 @@ private: friend struct llvm::AnalysisInfoMixin<{name}Analytics>; }; -class {name}Pass : public llvm::PassInfoMixin<{name}Pass> +class {name}Printer : public llvm::PassInfoMixin<{name}Printer> { public: /// Constructors and destructors /// @{ - {name}Pass() = default; - {name}Pass({name}Pass const &) = delete; - {name}Pass({name}Pass &&) = default; - ~{name}Pass() = default; + explicit {name}Printer(llvm::raw_ostream& out_stream); + {name}Printer() = delete; + {name}Printer({name}Printer const &) = delete; + {name}Printer({name}Printer &&) = default; + ~{name}Printer() = default; /// @} /// Operators /// @{ - {name}Pass &operator=({name}Pass const &) = delete; - {name}Pass &operator=({name}Pass &&) = delete; + {name}Printer &operator=({name}Printer const &) = delete; + {name}Printer &operator=({name}Printer &&) = delete; /// @} /// Functions required by LLVM @@ -58,6 +59,8 @@ public: llvm::PreservedAnalyses run(llvm::Function & function, llvm::FunctionAnalysisManager & fam); static bool isRequired(); /// @} +private: + llvm::raw_ostream& out_stream_; }; } // namespace quantum From f0c47f2adc4d489b58a5f76ee6de4640009458e9 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 10:31:42 +0200 Subject: [PATCH 031/106] Adding boiler plate for const size array analysis --- .../ConstSizeArrayAnalysis.cpp | 48 +++++++++++++ .../ConstSizeArrayAnalysis.hpp | 67 +++++++++++++++++++ .../LibConstSizeArrayAnalysis.cpp | 47 +++++++++++++ .../ConstSizeArrayAnalysis/SPECIFICATION.md | 0 4 files changed, 162 insertions(+) create mode 100644 src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp create mode 100644 src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp create mode 100644 src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp create mode 100644 src/Passes/libs/ConstSizeArrayAnalysis/SPECIFICATION.md diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp new file mode 100644 index 0000000000..1859544c02 --- /dev/null +++ b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp" + +#include "Llvm.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +ConstSizeArrayAnalysisAnalytics::Result ConstSizeArrayAnalysisAnalytics::run(llvm::Function &/*function*/, + llvm::FunctionAnalysisManager & /*unused*/) +{ + ConstSizeArrayAnalysisAnalytics::Result result; + + // Collect analytics here + + return result; +} + + +ConstSizeArrayAnalysisPrinter::ConstSizeArrayAnalysisPrinter(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) +{ +} + +llvm::PreservedAnalyses ConstSizeArrayAnalysisPrinter::run(llvm::Function & /*function*/, + llvm::FunctionAnalysisManager & /*fam*/) +{ + // auto &results = fam.getResult(function); + + // Use analytics here + out_stream_ << "Analysis results are printed using this stream\n"; + + return llvm::PreservedAnalyses::all(); +} + +bool ConstSizeArrayAnalysisPrinter::isRequired() +{ + return true; +} + +llvm::AnalysisKey ConstSizeArrayAnalysisAnalytics::Key; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp new file mode 100644 index 0000000000..f063161039 --- /dev/null +++ b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp @@ -0,0 +1,67 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" + +namespace microsoft { +namespace quantum { + +class ConstSizeArrayAnalysisAnalytics : public llvm::AnalysisInfoMixin +{ +public: + using Result = llvm::StringMap; ///< Change the type of the collected date here + + /// Constructors and destructors + /// @{ + ConstSizeArrayAnalysisAnalytics() = default; + ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics const &) = delete; + ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics &&) = default; + ~ConstSizeArrayAnalysisAnalytics() = default; + /// @} + + /// Operators + /// @{ + ConstSizeArrayAnalysisAnalytics &operator=(ConstSizeArrayAnalysisAnalytics const &) = delete; + ConstSizeArrayAnalysisAnalytics &operator=(ConstSizeArrayAnalysisAnalytics &&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + Result run(llvm::Function & function, llvm::FunctionAnalysisManager & /*unused*/); + /// @} + +private: + static llvm::AnalysisKey Key; // NOLINT + friend struct llvm::AnalysisInfoMixin; +}; + +class ConstSizeArrayAnalysisPrinter : public llvm::PassInfoMixin +{ +public: + /// Constructors and destructors + /// @{ + explicit ConstSizeArrayAnalysisPrinter(llvm::raw_ostream& out_stream); + ConstSizeArrayAnalysisPrinter() = delete; + ConstSizeArrayAnalysisPrinter(ConstSizeArrayAnalysisPrinter const &) = delete; + ConstSizeArrayAnalysisPrinter(ConstSizeArrayAnalysisPrinter &&) = default; + ~ConstSizeArrayAnalysisPrinter() = default; + /// @} + + /// Operators + /// @{ + ConstSizeArrayAnalysisPrinter &operator=(ConstSizeArrayAnalysisPrinter const &) = delete; + ConstSizeArrayAnalysisPrinter &operator=(ConstSizeArrayAnalysisPrinter &&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function & function, llvm::FunctionAnalysisManager & fam); + static bool isRequired(); + /// @} +private: + llvm::raw_ostream& out_stream_; +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp b/src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp new file mode 100644 index 0000000000..c77715b8c3 --- /dev/null +++ b/src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" +#include "ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp" + +#include +#include + +namespace { +// Interface to plugin +llvm::PassPluginLibraryInfo getConstSizeArrayAnalysisPluginInfo() +{ + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "ConstSizeArrayAnalysis", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering a printer for the anaylsis + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "print<{operation-name}>") + { + fpm.addPass(ConstSizeArrayAnalysisPrinter(llvm::errs())); + return true; + } + return false; + }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { + fpm.addPass(ConstSizeArrayAnalysisPrinter(llvm::errs())); + }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { + fam.registerPass([] { return ConstSizeArrayAnalysisAnalytics(); }); + }); + }}; +} + +} // namespace + +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return getConstSizeArrayAnalysisPluginInfo(); +} diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/SPECIFICATION.md b/src/Passes/libs/ConstSizeArrayAnalysis/SPECIFICATION.md new file mode 100644 index 0000000000..e69de29bb2 From 49fe60156df3de81e1a63ccb2cb94b3e2a1e443c Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 12:16:45 +0200 Subject: [PATCH 032/106] Adding constant allocation identification --- .../examples/ClassicalIrCommandline/README.md | 2 +- .../ConstSizeArray/ConstSizeArray.csproj | 9 + .../ConstSizeArray/ConstSizeArray.qs | 16 + .../ConstSizeArray/Makefile | 6 + .../ConstSizeArray/qir/ConstSizeArray-O3.ll | 50 + .../ConstSizeArray/qir/ConstSizeArray.ll | 2039 +++++++++++++++++ .../examples/ConstSizeArrayAnalysis/Makefile | 5 + .../examples/ConstSizeArrayAnalysis/README.md | 74 + .../analysis-problem.ll | 50 + .../ConstSizeArrayAnalysis.cpp | 79 +- .../ConstSizeArrayAnalysis.hpp | 29 +- .../LibConstSizeArrayAnalysis.cpp | 2 +- .../FunctionAnalysis/Lib{name}.cpp.tpl | 2 +- 13 files changed, 2335 insertions(+), 28 deletions(-) create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.csproj create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/Makefile create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/README.md create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem.ll diff --git a/src/Passes/examples/ClassicalIrCommandline/README.md b/src/Passes/examples/ClassicalIrCommandline/README.md index b293fc6b5c..1227fdbe70 100644 --- a/src/Passes/examples/ClassicalIrCommandline/README.md +++ b/src/Passes/examples/ClassicalIrCommandline/README.md @@ -32,5 +32,5 @@ opt -load ../../{Debug,Release}/libQSharpPasses.{dylib,so} -legacy-operation-cou This part assumes that you have build the QsPasses library. ```sh -opt -load-pass-plugin ../../{Debug,Release}/libQSharpPasses.{dylib,so} --passes="operation-counter" -disable-output classical-program.bc +opt -load-pass-plugin ../../{Debug,Release}/libs/libQSharpPasses.{dylib,so} --passes="print" -disable-output classical-program.bc ``` diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.csproj b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.csproj new file mode 100644 index 0000000000..eeab572589 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.csproj @@ -0,0 +1,9 @@ + + + + Exe + netcoreapp3.1 + true + + + diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs new file mode 100644 index 0000000000..7d46c1a7af --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Example { + @EntryPoint() + operation Main() : Int + { + return QuantumFunction(10); + } + + operation QuantumFunction(nQubits : Int) : Int { + use qubits = Qubit[nQubits]; + + return 0; + } +} diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile new file mode 100644 index 0000000000..db141e9e19 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile @@ -0,0 +1,6 @@ + +clean: + rm -rf bin + rm -rf obj + rm -rf qir + \ No newline at end of file diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll new file mode 100644 index 0000000000..01315ee268 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll @@ -0,0 +1,50 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumFunction__body() + ret void +} + +define internal fastcc void @Example__QuantumFunction__body() unnamed_addr { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 10) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #0 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll new file mode 100644 index 0000000000..9279113efd --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll @@ -0,0 +1,2039 @@ + +%Range = type { i64, i64, i64 } +%Tuple = type opaque +%Array = type opaque +%Qubit = type opaque +%String = type opaque +%Callable = type opaque +%Result = type opaque + +@PauliI = internal constant i2 0 +@PauliX = internal constant i2 1 +@PauliY = internal constant i2 -1 +@PauliZ = internal constant i2 -2 +@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } +@PartialApplication__1 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__1 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] +@PartialApplication__2 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__3 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctladj__wrapper] +@MemoryManagement__2 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__2__RefCount, void (%Tuple*, i32)* @MemoryManagement__2__AliasCount] +@PartialApplication__4 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctladj__wrapper] +@PartialApplication__5 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctladj__wrapper] +@PartialApplication__6 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctladj__wrapper] +@PartialApplication__7 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__8 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__9 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctladj__wrapper] +@PartialApplication__10 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctladj__wrapper] +@PartialApplication__11 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctladj__wrapper] +@PartialApplication__12 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctladj__wrapper] + +define internal i64 @Example__Main__body() { +entry: + %0 = call i64 @Example__QuantumFunction__body(i64 10) + ret i64 %0 +} + +define internal i64 @Example__QuantumFunction__body(i64 %nQubits) { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nQubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret i64 0 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +define internal { %String* }* @Microsoft__Quantum__Diagnostics__EnableTestingViaName__body(%String* %__Item1__) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 + store %String* %__Item1__, %String** %2, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) + ret { %String* }* %1 +} + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +define internal { %String* }* @Microsoft__Quantum__Diagnostics__Test__body(%String* %ExecutionTarget) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 + store %String* %ExecutionTarget, %String** %2, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %ExecutionTarget, i32 1) + ret { %String* }* %1 +} + +define internal %Tuple* @Microsoft__Quantum__Core__Attribute__body() { +entry: + ret %Tuple* null +} + +define internal { %String* }* @Microsoft__Quantum__Core__Deprecated__body(%String* %NewName) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 + store %String* %NewName, %String** %2, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %NewName, i32 1) + ret { %String* }* %1 +} + +define internal %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { +entry: + ret %Tuple* null +} + +define internal %Tuple* @Microsoft__Quantum__Core__Inline__body() { +entry: + ret %Tuple* null +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +declare void @__quantum__qis__applyconditionallyintrinsic__body(%Array*, %Array*, %Callable*, %Callable*) + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResults = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %resultsValues = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onEqualOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 + %onNonEqualOp = load %Callable*, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %9, %Callable** %7, align 8 + store %Array* %ctls, %Array** %8, align 8 + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 + %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %14) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %14, %Callable** %12, align 8 + store %Array* %ctls, %Array** %13, align 8 + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %10) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResults = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %resultsValues = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onEqualOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 + %onNonEqualOp = load %Callable*, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %9, %Callable** %7, align 8 + store %Array* %ctls, %Array** %8, align 8 + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 + %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %14) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %14, %Callable** %12, align 8 + store %Array* %ctls, %Array** %13, align 8 + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctladj(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResults = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %resultsValues = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onEqualOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 + %onNonEqualOp = load %Callable*, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %9, %Callable** %7, align 8 + store %Array* %ctls, %Array** %8, align 8 + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__5, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 + %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %14) + call void @__quantum__rt__callable_make_controlled(%Callable* %14) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %14, %Callable** %12, align 8 + store %Array* %ctls, %Array** %13, align 8 + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__6, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +declare void @__quantum__qis__applyifelseintrinsic__body(%Result*, %Callable*, %Callable*) + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResult = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %onResultZeroOp = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onResultOneOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %8, %Callable** %6, align 8 + store %Array* %ctls, %Array** %7, align 8 + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__7, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %4) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %13, %Callable** %11, align 8 + store %Array* %ctls, %Array** %12, align 8 + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__8, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %9) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResult = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %onResultZeroOp = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onResultOneOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %8, %Callable** %6, align 8 + store %Array* %ctls, %Array** %7, align 8 + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__9, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %13, %Callable** %11, align 8 + store %Array* %ctls, %Array** %12, align 8 + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__10, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctladj(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResult = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %onResultZeroOp = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onResultOneOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %8, %Callable** %6, align 8 + store %Array* %ctls, %Array** %7, align 8 + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__11, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %13, %Callable** %11, align 8 + store %Array* %ctls, %Array** %12, align 8 + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__12, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal { %String*, %String* }* @Microsoft__Quantum__Targeting__RequiresCapability__body(%String* %Level, %String* %Reason) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %1 = bitcast %Tuple* %0 to { %String*, %String* }* + %2 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 1 + store %String* %Level, %String** %2, align 8 + store %String* %Reason, %String** %3, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %Level, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %Reason, i32 1) + ret { %String*, %String* }* %1 +} + +define internal { %String* }* @Microsoft__Quantum__Targeting__TargetInstruction__body(%String* %__Item1__) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 + store %String* %__Item1__, %String** %2, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) + ret { %String* }* %1 +} + +define i64 @Example__Main__Interop() #0 { +entry: + %0 = call i64 @Example__Main__body() + ret i64 %0 +} + +define void @Example__Main() #1 { +entry: + %0 = call i64 @Example__Main__body() + %1 = call %String* @__quantum__rt__int_to_string(i64 %0) + call void @__quantum__rt__message(%String* %1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) + +declare %String* @__quantum__rt__int_to_string(i64) + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile new file mode 100644 index 0000000000..fa5dd88fb0 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile @@ -0,0 +1,5 @@ +run: build + opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem.ll + +build: + pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make ConstSizeArrayAnalysis && popd || popd diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/README.md b/src/Passes/examples/ConstSizeArrayAnalysis/README.md new file mode 100644 index 0000000000..3d662c5b9f --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/README.md @@ -0,0 +1,74 @@ +# ConstSizeArray + +## Running the analysis + +Ensure that you have build the latest version of the pass + +```sh +% make build +``` + +```sh +% opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem.ll +``` + +## Generating an example QIR + +Build the QIR + +```sh +cd ConstSizeArray +dotnet build ConstSizeArray.csproj +``` + +Strip it of unecessary information + +```sh +opt -S qir/ConstSizeArray.ll -O1 > qir/Problem.ll +``` + +Result should be similar to + +``` +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #0 { +entry: + %qubits.i.i = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 10) + tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 1) + tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 -1) + tail call void @__quantum__rt__qubit_release_array(%Array* %qubits.i.i) + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #1 { +entry: + %qubits.i.i = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 10) + tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 1) + tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 -1) + tail call void @__quantum__rt__qubit_release_array(%Array* %qubits.i.i) + %0 = tail call %String* @__quantum__rt__int_to_string(i64 0) + tail call void @__quantum__rt__message(%String* %0) + tail call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } +``` diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem.ll new file mode 100644 index 0000000000..01315ee268 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem.ll @@ -0,0 +1,50 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumFunction__body() + ret void +} + +define internal fastcc void @Example__QuantumFunction__body() unnamed_addr { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 10) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #0 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp index 1859544c02..37c455961b 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp +++ b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp @@ -10,29 +10,86 @@ namespace microsoft { namespace quantum { -ConstSizeArrayAnalysisAnalytics::Result ConstSizeArrayAnalysisAnalytics::run(llvm::Function &/*function*/, - llvm::FunctionAnalysisManager & /*unused*/) +ConstSizeArrayAnalysisAnalytics::Result ConstSizeArrayAnalysisAnalytics::run( + llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) { ConstSizeArrayAnalysisAnalytics::Result result; // Collect analytics here + // Use analytics here + for (auto &basic_block : function) + { + for (auto &instruction : basic_block) + { + // Skipping debug code + if (instruction.isDebugOrPseudoInst()) + { + continue; + } + + // Checking if it is a call instruction + auto *call_instr = llvm::dyn_cast(&instruction); + if (call_instr == nullptr) + { + continue; + } + + auto target_function = call_instr->getCalledFunction(); + auto name = target_function->getName(); + + // TODO(tfr): Find a better way to inject runtime symbols + if (name != "__quantum__rt__qubit_allocate_array") + { + continue; + } + + // Validating that there exactly one argument + if (call_instr->arg_size() != 1) + { + continue; + } + + // Getting the size of the argument + auto size_value = call_instr->getArgOperand(0); + if (size_value == nullptr) + { + continue; + } + + // Checking if the value is constant + auto cst = llvm::dyn_cast(size_value); + if (cst == nullptr) + { + continue; + } + + result[name] = cst->getValue().getSExtValue(); + } + } + return result; } - -ConstSizeArrayAnalysisPrinter::ConstSizeArrayAnalysisPrinter(llvm::raw_ostream& out_stream) +ConstSizeArrayAnalysisPrinter::ConstSizeArrayAnalysisPrinter(llvm::raw_ostream &out_stream) : out_stream_(out_stream) -{ -} +{} -llvm::PreservedAnalyses ConstSizeArrayAnalysisPrinter::run(llvm::Function & /*function*/, - llvm::FunctionAnalysisManager & /*fam*/) +llvm::PreservedAnalyses ConstSizeArrayAnalysisPrinter::run(llvm::Function & function, + llvm::FunctionAnalysisManager &fam) { - // auto &results = fam.getResult(function); + auto &results = fam.getResult(function); - // Use analytics here - out_stream_ << "Analysis results are printed using this stream\n"; + if (!results.empty()) + { + out_stream_ << function.getName() << "\n"; + out_stream_ << "====================" + << "\n\n"; + for (auto const &size_info : results) + { + out_stream_ << size_info.first() << ": " << size_info.second << "\n"; + } + } return llvm::PreservedAnalyses::all(); } diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp index f063161039..b30abcf9b9 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp +++ b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp @@ -7,17 +7,18 @@ namespace microsoft { namespace quantum { -class ConstSizeArrayAnalysisAnalytics : public llvm::AnalysisInfoMixin +class ConstSizeArrayAnalysisAnalytics + : public llvm::AnalysisInfoMixin { public: - using Result = llvm::StringMap; ///< Change the type of the collected date here + using Result = llvm::StringMap; ///< Change the type of the collected date here /// Constructors and destructors /// @{ - ConstSizeArrayAnalysisAnalytics() = default; - ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics const &) = delete; - ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics &&) = default; - ~ConstSizeArrayAnalysisAnalytics() = default; + ConstSizeArrayAnalysisAnalytics() = default; + ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics const &) = delete; + ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics &&) = default; + ~ConstSizeArrayAnalysisAnalytics() = default; /// @} /// Operators @@ -28,7 +29,7 @@ class ConstSizeArrayAnalysisAnalytics : public llvm::AnalysisInfoMixin /*unused*/) { - if (name == "print<{operation-name}>") + if (name == "print") { fpm.addPass(ConstSizeArrayAnalysisPrinter(llvm::errs())); return true; diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl index 1f55809910..beb4589fe1 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl @@ -19,7 +19,7 @@ llvm::PassPluginLibraryInfo get{name}PluginInfo() // Registering a printer for the anaylsis pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, ArrayRef /*unused*/) { - if (name == "print<{operation-name}>") + if (name == "print<{operation_name}>") { fpm.addPass({name}Printer(llvm::errs())); return true; From cc1a9791cd7d1e2e617400b4d3083489c2ecc9e4 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 12:17:52 +0200 Subject: [PATCH 033/106] Removing garbage --- .../ConstSizeArray/qir/ConstSizeArray-O3.ll | 50 - .../ConstSizeArray/qir/ConstSizeArray.ll | 2039 ----------------- 2 files changed, 2089 deletions(-) delete mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll delete mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll deleted file mode 100644 index 01315ee268..0000000000 --- a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll +++ /dev/null @@ -1,50 +0,0 @@ -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Array = type opaque -%String = type opaque - -define internal fastcc void @Example__Main__body() unnamed_addr { -entry: - call fastcc void @Example__QuantumFunction__body() - ret void -} - -define internal fastcc void @Example__QuantumFunction__body() unnamed_addr { -entry: - %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 10) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits) - ret void -} - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define i64 @Example__Main__Interop() local_unnamed_addr #0 { -entry: - call fastcc void @Example__Main__body() - ret i64 0 -} - -define void @Example__Main() local_unnamed_addr #1 { -entry: - call fastcc void @Example__Main__body() - %0 = call %String* @__quantum__rt__int_to_string(i64 0) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll deleted file mode 100644 index 9279113efd..0000000000 --- a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll +++ /dev/null @@ -1,2039 +0,0 @@ - -%Range = type { i64, i64, i64 } -%Tuple = type opaque -%Array = type opaque -%Qubit = type opaque -%String = type opaque -%Callable = type opaque -%Result = type opaque - -@PauliI = internal constant i2 0 -@PauliX = internal constant i2 1 -@PauliY = internal constant i2 -1 -@PauliZ = internal constant i2 -2 -@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } -@PartialApplication__1 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@MemoryManagement__1 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] -@PartialApplication__2 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__3 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctladj__wrapper] -@MemoryManagement__2 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__2__RefCount, void (%Tuple*, i32)* @MemoryManagement__2__AliasCount] -@PartialApplication__4 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctladj__wrapper] -@PartialApplication__5 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctladj__wrapper] -@PartialApplication__6 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctladj__wrapper] -@PartialApplication__7 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__8 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__9 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctladj__wrapper] -@PartialApplication__10 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctladj__wrapper] -@PartialApplication__11 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctladj__wrapper] -@PartialApplication__12 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctladj__wrapper] - -define internal i64 @Example__Main__body() { -entry: - %0 = call i64 @Example__QuantumFunction__body(i64 10) - ret i64 %0 -} - -define internal i64 @Example__QuantumFunction__body(i64 %nQubits) { -entry: - %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nQubits) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits) - ret i64 0 -} - -declare %Qubit* @__quantum__rt__qubit_allocate() - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) - -declare void @__quantum__rt__qubit_release_array(%Array*) - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) - -define internal { %String* }* @Microsoft__Quantum__Diagnostics__EnableTestingViaName__body(%String* %__Item1__) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %__Item1__, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) - ret { %String* }* %1 -} - -declare %Tuple* @__quantum__rt__tuple_create(i64) - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) - -define internal { %String* }* @Microsoft__Quantum__Diagnostics__Test__body(%String* %ExecutionTarget) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %ExecutionTarget, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %ExecutionTarget, i32 1) - ret { %String* }* %1 -} - -define internal %Tuple* @Microsoft__Quantum__Core__Attribute__body() { -entry: - ret %Tuple* null -} - -define internal { %String* }* @Microsoft__Quantum__Core__Deprecated__body(%String* %NewName) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %NewName, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %NewName, i32 1) - ret { %String* }* %1 -} - -define internal %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { -entry: - ret %Tuple* null -} - -define internal %Tuple* @Microsoft__Quantum__Core__Inline__body() { -entry: - ret %Tuple* null -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) - -declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) - -declare void @__quantum__qis__applyconditionallyintrinsic__body(%Array*, %Array*, %Callable*, %Callable*) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) - -declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) - -declare void @__quantum__rt__callable_make_adjoint(%Callable*) - -declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResults = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %resultsValues = load %Array*, %Array** %2, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onEqualOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 - %onNonEqualOp = load %Callable*, %Callable** %4, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 - %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %9) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %9, %Callable** %7, align 8 - store %Array* %ctls, %Array** %8, align 8 - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %5) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 - %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 - %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %14) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %14, %Callable** %12, align 8 - store %Array* %ctls, %Array** %13, align 8 - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %10) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) - -declare void @__quantum__rt__callable_make_controlled(%Callable*) - -declare void @__quantum__rt__array_update_reference_count(%Array*, i32) - -define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) - ret void -} - -define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) - ret void -} - -define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__2__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) - -declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) - -declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResults = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %resultsValues = load %Array*, %Array** %2, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onEqualOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 - %onNonEqualOp = load %Callable*, %Callable** %4, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 - %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %9) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %9, %Callable** %7, align 8 - store %Array* %ctls, %Array** %8, align 8 - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 - %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 - %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %14) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %14, %Callable** %12, align 8 - store %Array* %ctls, %Array** %13, align 8 - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__3__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__3__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__3__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i32 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) - ret void -} - -define internal void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) - ret void -} - -define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__4__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__4__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__4__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctladj(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResults = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %resultsValues = load %Array*, %Array** %2, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onEqualOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 - %onNonEqualOp = load %Callable*, %Callable** %4, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 - %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_make_controlled(%Callable* %9) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %9, %Callable** %7, align 8 - store %Array* %ctls, %Array** %8, align 8 - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__5, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 - %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 - %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %14) - call void @__quantum__rt__callable_make_controlled(%Callable* %14) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %14, %Callable** %12, align 8 - store %Array* %ctls, %Array** %13, align 8 - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__6, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__5__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__5__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__5__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__5__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__6__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__6__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__6__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__6__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -declare void @__quantum__qis__applyifelseintrinsic__body(%Result*, %Callable*, %Callable*) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResult = load %Result*, %Result** %1, align 8 - %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %onResultZeroOp = load %Callable*, %Callable** %2, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onResultOneOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %8) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %8, %Callable** %6, align 8 - store %Array* %ctls, %Array** %7, align 8 - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__7, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %4) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* - %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 - %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %13) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %13, %Callable** %11, align 8 - store %Array* %ctls, %Array** %12, align 8 - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__8, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %9) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__7__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__7__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__8__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__8__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResult = load %Result*, %Result** %1, align 8 - %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %onResultZeroOp = load %Callable*, %Callable** %2, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onResultOneOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %8) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %8, %Callable** %6, align 8 - store %Array* %ctls, %Array** %7, align 8 - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__9, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* - %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 - %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %13) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %13, %Callable** %11, align 8 - store %Array* %ctls, %Array** %12, align 8 - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__10, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__9__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__9__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__9__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__9__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__10__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__10__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__10__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__10__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctladj(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResult = load %Result*, %Result** %1, align 8 - %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %onResultZeroOp = load %Callable*, %Callable** %2, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onResultOneOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %8) - call void @__quantum__rt__callable_make_controlled(%Callable* %8) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %8, %Callable** %6, align 8 - store %Array* %ctls, %Array** %7, align 8 - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__11, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* - %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 - %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %13) - call void @__quantum__rt__callable_make_controlled(%Callable* %13) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %13, %Callable** %11, align 8 - store %Array* %ctls, %Array** %12, align 8 - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__12, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__11__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__11__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__11__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__11__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__12__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__12__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__12__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__12__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal { %String*, %String* }* @Microsoft__Quantum__Targeting__RequiresCapability__body(%String* %Level, %String* %Reason) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %1 = bitcast %Tuple* %0 to { %String*, %String* }* - %2 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 0 - %3 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 1 - store %String* %Level, %String** %2, align 8 - store %String* %Reason, %String** %3, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %Level, i32 1) - call void @__quantum__rt__string_update_reference_count(%String* %Reason, i32 1) - ret { %String*, %String* }* %1 -} - -define internal { %String* }* @Microsoft__Quantum__Targeting__TargetInstruction__body(%String* %__Item1__) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %__Item1__, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) - ret { %String* }* %1 -} - -define i64 @Example__Main__Interop() #0 { -entry: - %0 = call i64 @Example__Main__body() - ret i64 %0 -} - -define void @Example__Main() #1 { -entry: - %0 = call i64 @Example__Main__body() - %1 = call %String* @__quantum__rt__int_to_string(i64 %0) - call void @__quantum__rt__message(%String* %1) - call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) - -declare %String* @__quantum__rt__int_to_string(i64) - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } From 7319eb435417b8ddceb640e9e6acf9f257a51626 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 12:33:05 +0200 Subject: [PATCH 034/106] Adding slightly harder example --- .../ConstSizeArray/ConstSizeArray.qs | 2 +- .../examples/ConstSizeArrayAnalysis/Makefile | 4 +- ...lysis-problem.ll => analysis-problem-1.ll} | 0 .../analysis-problem-2.ll | 51 +++++++++++++++++++ 4 files changed, 54 insertions(+), 3 deletions(-) rename src/Passes/examples/ConstSizeArrayAnalysis/{analysis-problem.ll => analysis-problem-1.ll} (100%) create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs index 7d46c1a7af..53b9e4cc36 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -5,7 +5,7 @@ namespace Example { @EntryPoint() operation Main() : Int { - return QuantumFunction(10); + return QuantumFunction(10) + QuantumFunction(3); } operation QuantumFunction(nQubits : Int) : Int { diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile index fa5dd88fb0..bf0b756b86 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile +++ b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile @@ -1,5 +1,5 @@ run: build - opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem.ll - + opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem-1.ll + build: pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make ConstSizeArrayAnalysis && popd || popd diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-1.ll similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem.ll rename to src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-1.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll new file mode 100644 index 0000000000..14b286b182 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll @@ -0,0 +1,51 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumFunction__body(i64 10) + call fastcc void @Example__QuantumFunction__body(i64 3) + ret void +} + +define internal fastcc void @Example__QuantumFunction__body(i64 %nQubits) unnamed_addr { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nQubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #0 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } From c051ee678b4fe2a1caea9a68c3145cbf3c8e56de Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 14:00:21 +0200 Subject: [PATCH 035/106] Updating with additional example --- .../ConstSizeArray/ConstSizeArray.qs | 8 ++++---- .../examples/ConstSizeArrayAnalysis/analysis-problem-2.ll | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs index 53b9e4cc36..e59bf41637 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -5,12 +5,12 @@ namespace Example { @EntryPoint() operation Main() : Int { - return QuantumFunction(10) + QuantumFunction(3); + QuantumFunction(3); + QuantumFunction(10) ; + return 0; } - operation QuantumFunction(nQubits : Int) : Int { + operation QuantumFunction(nQubits : Int) : Unit { use qubits = Qubit[nQubits]; - - return 0; } } diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll index 14b286b182..2577a9de2f 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll +++ b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll @@ -6,8 +6,8 @@ source_filename = "qir/ConstSizeArray.ll" define internal fastcc void @Example__Main__body() unnamed_addr { entry: - call fastcc void @Example__QuantumFunction__body(i64 10) call fastcc void @Example__QuantumFunction__body(i64 3) + call fastcc void @Example__QuantumFunction__body(i64 10) ret void } From 6f34ee8f54b27db1db707110749caeba920f7478 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 12:37:37 +0200 Subject: [PATCH 036/106] Adding static analysis for qubit allocation --- src/Passes/.clang-tidy | 26 +- .../ConstSizeArray/Comparison.cpp | 18 ++ .../ConstSizeArray/ConstSizeArray.qs | 25 +- .../ConstSizeArray/Makefile | 3 +- .../ConstSizeArray/comparison.ll | 75 +++++ .../examples/ConstSizeArrayAnalysis/Makefile | 4 +- .../examples/ConstSizeArrayAnalysis/README.md | 86 ++++++ .../analysis-problem-2.ll | 8 +- .../analysis-problem-3.ll | 52 ++++ .../analysis-problem-4.ll | 80 ++++++ .../ConstSizeArrayAnalysis.cpp | 263 +++++++++++++++--- .../ConstSizeArrayAnalysis.hpp | 44 ++- 12 files changed, 613 insertions(+), 71 deletions(-) create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Comparison.cpp create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/comparison.ll create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-3.ll create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-4.ll diff --git a/src/Passes/.clang-tidy b/src/Passes/.clang-tidy index a0f64b8d21..7260427b82 100644 --- a/src/Passes/.clang-tidy +++ b/src/Passes/.clang-tidy @@ -66,11 +66,23 @@ CheckOptions: - key: readability-identifier-naming.ProtectedMemberSuffix value: '_' - # Alias + # Type Alias and Enum Types / constants - key: readability-identifier-naming.TypeAliasCase value: 'CamelCase' - key: readability-identifier-naming.TypedefCase value: 'CamelCase' + - key: readability-identifier-naming.EnumCase + value: 'CamelCase' + - key: readability-identifier-naming.EnumConstantCase + value: 'CamelCase' + + # Globals, consts and enums + - key: readability-identifier-naming.GlobalConstantCase + value: 'UPPER_CASE' + - key: readability-identifier-naming.GlobalConstantPrefix + value: 'G_' + - key: readability-identifier-naming.ConstantCase + value: 'UPPER_CASE' # Functions - key: readability-identifier-naming.FunctionCase @@ -86,18 +98,6 @@ CheckOptions: - key: readability-identifier-naming.ParameterCase value: 'lower_case' - # Globals, consts and enums - - key: readability-identifier-naming.GlobalConstantCase - value: 'UPPER_CASE' - - key: readability-identifier-naming.GlobalConstantPrefix - value: 'G_' - - key: readability-identifier-naming.ConstantCase - value: 'UPPER_CASE' - - key: readability-identifier-naming.EnumCase - value: 'CamelCase' - - key: readability-identifier-naming.EnumConstantCase - value: 'CamelCase' - # Macros - key: readability-identifier-naming.MacroDefinitionCase value: 'UPPER_CASE' diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Comparison.cpp b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Comparison.cpp new file mode 100644 index 0000000000..00f2b75f5f --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Comparison.cpp @@ -0,0 +1,18 @@ +#include + +void QuantumFunction(int32_t nQubits) +{ + volatile uint64_t x = 3; + for (uint64_t i = 0; i < x; ++i) + { + nQubits += nQubits; + } + int32_t qubits[nQubits]; +} + +int main() +{ + QuantumFunction(10); + QuantumFunction(3); + return 0; +} \ No newline at end of file diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs index e59bf41637..ae895648ed 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -1,16 +1,25 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - namespace Example { + @EntryPoint() operation Main() : Int { - QuantumFunction(3); - QuantumFunction(10) ; + QuantumProgram(3,2,1); + QuantumProgram(4,9,4); return 0; } - operation QuantumFunction(nQubits : Int) : Unit { - use qubits = Qubit[nQubits]; + function X(value: Int): Int + { + return 3 * value; + } + + operation QuantumProgram(x: Int, h: Int, g: Int) : Unit { + let z = x * (x + 1) - 47; + let y = 3 * x; + + use qubits1 = Qubit[(y - 2)/2-z]; + use qubits2 = Qubit[y - g]; + use qubits3 = Qubit[h]; + use qubits4 = Qubit[X(x)]; } -} +} \ No newline at end of file diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile index db141e9e19..fa97ab5b45 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile @@ -1,4 +1,5 @@ - +comparison: + clang++ -S -emit-llvm -std=c++14 -stdlib=libc++ Comparison.cpp -o comparison.ll clean: rm -rf bin rm -rf obj diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/comparison.ll b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/comparison.ll new file mode 100644 index 0000000000..0e2c5308a3 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/comparison.ll @@ -0,0 +1,75 @@ +; ModuleID = 'Comparison.cpp' +source_filename = "Comparison.cpp" +target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx11.0.0" + +; Function Attrs: noinline nounwind optnone ssp uwtable mustprogress +define dso_local void @_Z15QuantumFunctioni(i32 %0) #0 { + %2 = alloca i32, align 4 + %3 = alloca i64, align 8 + %4 = alloca i64, align 8 + %5 = alloca i8*, align 8 + %6 = alloca i64, align 8 + store i32 %0, i32* %2, align 4 + store volatile i64 3, i64* %3, align 8 + store i64 0, i64* %4, align 8 + br label %7 + +7: ; preds = %15, %1 + %8 = load i64, i64* %4, align 8 + %9 = load volatile i64, i64* %3, align 8 + %10 = icmp ult i64 %8, %9 + br i1 %10, label %11, label %18 + +11: ; preds = %7 + %12 = load i32, i32* %2, align 4 + %13 = load i32, i32* %2, align 4 + %14 = add nsw i32 %13, %12 + store i32 %14, i32* %2, align 4 + br label %15 + +15: ; preds = %11 + %16 = load i64, i64* %4, align 8 + %17 = add i64 %16, 1 + store i64 %17, i64* %4, align 8 + br label %7, !llvm.loop !3 + +18: ; preds = %7 + %19 = load i32, i32* %2, align 4 + %20 = zext i32 %19 to i64 + %21 = call i8* @llvm.stacksave() + store i8* %21, i8** %5, align 8 + %22 = alloca i32, i64 %20, align 16 + store i64 %20, i64* %6, align 8 + %23 = load i8*, i8** %5, align 8 + call void @llvm.stackrestore(i8* %23) + ret void +} + +; Function Attrs: nofree nosync nounwind willreturn +declare i8* @llvm.stacksave() #1 + +; Function Attrs: nofree nosync nounwind willreturn +declare void @llvm.stackrestore(i8*) #1 + +; Function Attrs: noinline norecurse nounwind optnone ssp uwtable mustprogress +define dso_local i32 @main() #2 { + %1 = alloca i32, align 4 + store i32 0, i32* %1, align 4 + call void @_Z15QuantumFunctioni(i32 10) + call void @_Z15QuantumFunctioni(i32 3) + ret i32 0 +} + +attributes #0 = { noinline nounwind optnone ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nofree nosync nounwind willreturn } +attributes #2 = { noinline norecurse nounwind optnone ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } + +!llvm.module.flags = !{!0, !1} +!llvm.ident = !{!2} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{!"Homebrew clang version 12.0.1"} +!3 = distinct !{!3, !4} +!4 = !{!"llvm.loop.mustprogress"} diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile index bf0b756b86..4050079dfe 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile +++ b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile @@ -1,5 +1,7 @@ run: build - opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem-1.ll +# opt -load-pass-plugin ../../Debug/libs/libMetaData.dylib --passes="meta-data" -S analysis-problem-4.ll + opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem-4.ll build: pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make ConstSizeArrayAnalysis && popd || popd + pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make MetaData && popd || popd diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/README.md b/src/Passes/examples/ConstSizeArrayAnalysis/README.md index 3d662c5b9f..fba3828ece 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/README.md +++ b/src/Passes/examples/ConstSizeArrayAnalysis/README.md @@ -12,6 +12,88 @@ Ensure that you have build the latest version of the pass % opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem.ll ``` +## Cases to consider + +```qsharp +namespace Example { + + @EntryPoint() + operation Main() : Int + { + QuantumProgram(); + return 0; + } + + + + operation QuantumProgram(x: Int) : Unit { + use qubits = Qubit[3]; + } + +} +``` + +```qsharp +namespace Example { + + @EntryPoint() + operation Main() : Int + { + QuantumProgram(3); + QuantumProgram(4); + return 0; + } + + + + operation QuantumProgram(x: Int) : Unit { + use qubits = Qubit[x]; + } + +} +``` + +```qsharp +namespace Example { + + @EntryPoint() + operation Main() : Int + { + QuantumProgram(3); + QuantumProgram(4); + return 0; + } + + + + operation QuantumProgram(x: Int) : Unit { + use qubits = Qubit[x * x]; + } + +} +``` + +```qsharp +namespace Example { + + @EntryPoint() + operation Main() : Int + { + QuantumProgram(ComputeNumberOfQubits); + return 0; + } + + function ComputeNumberOfQubits(x: Int): Int { + return x * x; + } + + operation QuantumProgram(fnc : Int -> Int) : Unit { + use qubits = Qubit[fnc(3)]; + } + +} +``` + ## Generating an example QIR Build the QIR @@ -72,3 +154,7 @@ declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr attributes #0 = { "InteropFriendly" } attributes #1 = { "EntryPoint" } ``` + +# Notes + +To make a proper version of Const Size deduction, look at the [constant folding](https://llvm.org/doxygen/ConstantFolding_8cpp_source.html) implementation and in particular, the [target library](https://llvm.org/doxygen/classllvm_1_1TargetLibraryInfo.html) which essentially promotes information about the runtime. diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll index 2577a9de2f..ea4ead0400 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll +++ b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll @@ -6,14 +6,14 @@ source_filename = "qir/ConstSizeArray.ll" define internal fastcc void @Example__Main__body() unnamed_addr { entry: - call fastcc void @Example__QuantumFunction__body(i64 3) - call fastcc void @Example__QuantumFunction__body(i64 10) + call fastcc void @Example__QuantumProgram__body(i64 3) + call fastcc void @Example__QuantumProgram__body(i64 4) ret void } -define internal fastcc void @Example__QuantumFunction__body(i64 %nQubits) unnamed_addr { +define internal fastcc void @Example__QuantumProgram__body(i64 %x) unnamed_addr { entry: - %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nQubits) + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %x) call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) call void @__quantum__rt__qubit_release_array(%Array* %qubits) diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-3.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-3.ll new file mode 100644 index 0000000000..65703f0d8e --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-3.ll @@ -0,0 +1,52 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumProgram__body(i64 3) + call fastcc void @Example__QuantumProgram__body(i64 4) + ret void +} + +define internal fastcc void @Example__QuantumProgram__body(i64 %x) unnamed_addr { +entry: + %0 = mul i64 %x, %x + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #0 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-4.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-4.ll new file mode 100644 index 0000000000..420563f193 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-4.ll @@ -0,0 +1,80 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumProgram__body(i64 3, i64 2, i64 1) + call fastcc void @Example__QuantumProgram__body(i64 4, i64 9, i64 4) + ret void +} + +define internal fastcc void @Example__QuantumProgram__body(i64 %x, i64 %h, i64 %g) unnamed_addr { +entry: + %.neg = xor i64 %x, -1 + %.neg1 = mul i64 %.neg, %x + %z.neg = add i64 %.neg1, 47 + %y = mul i64 %x, 3 + %0 = add i64 %y, -2 + %1 = lshr i64 %0, 1 + %2 = add i64 %z.neg, %1 + %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) + %3 = sub i64 %y, %g + %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) + %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %h) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) + %4 = call fastcc i64 @Example__X__body(i64 %x) + %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits1) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +; Function Attrs: norecurse nounwind readnone willreturn +define internal fastcc i64 @Example__X__body(i64 %value) unnamed_addr #0 { +entry: + %0 = mul i64 %value, 3 + ret i64 %0 +} + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #2 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { norecurse nounwind readnone willreturn } +attributes #1 = { "InteropFriendly" } +attributes #2 = { "EntryPoint" } diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp index 37c455961b..2a8d0eaab6 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp +++ b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp @@ -7,68 +7,240 @@ #include #include +#include namespace microsoft { namespace quantum { -ConstSizeArrayAnalysisAnalytics::Result ConstSizeArrayAnalysisAnalytics::run( - llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) + +bool ConstSizeArrayAnalysisAnalytics::operandsConstant(Instruction const &instruction) const { - ConstSizeArrayAnalysisAnalytics::Result result; + bool ret = true; - // Collect analytics here + // Checking that all oprands are constant + for (auto &op : instruction.operands()) + { - // Use analytics here - for (auto &basic_block : function) + auto const_arg = value_depending_on_args_.find(op) != value_depending_on_args_.end(); + auto cst = llvm::dyn_cast(op); + auto is_constant = (cst != nullptr); + + ret = ret && (const_arg || is_constant); + } + + return ret; +} + +void ConstSizeArrayAnalysisAnalytics::markPossibleConstant(Instruction &instruction) +{ + /* + // Rename constant variables + if (!instruction.hasName()) { - for (auto &instruction : basic_block) + // Naming result + char new_name[64] = {0}; + auto fmt = llvm::format("microsoft_reserved_possible_const_ret%u", tmp_counter_); + fmt.print(new_name, 64); + instruction.setName(new_name); + } + */ + + // Creating arg dependencies + ArgList all_dependencies{}; + for (auto &op : instruction.operands()) + { + auto it = value_depending_on_args_.find(op); + if (it != value_depending_on_args_.end()) { - // Skipping debug code - if (instruction.isDebugOrPseudoInst()) + for (auto &arg : it->second) { - continue; + all_dependencies.insert(arg); } + } + } - // Checking if it is a call instruction - auto *call_instr = llvm::dyn_cast(&instruction); - if (call_instr == nullptr) - { - continue; - } + // Adding the new name to the list + value_depending_on_args_.insert({&instruction, all_dependencies}); +} - auto target_function = call_instr->getCalledFunction(); - auto name = target_function->getName(); +void ConstSizeArrayAnalysisAnalytics::analyseCall(Instruction &instruction) +{ + // Skipping debug code + if (instruction.isDebugOrPseudoInst()) + { + return; + } - // TODO(tfr): Find a better way to inject runtime symbols - if (name != "__quantum__rt__qubit_allocate_array") - { - continue; - } + auto *call_instr = llvm::dyn_cast(&instruction); + if (call_instr == nullptr) + { + return; + } - // Validating that there exactly one argument - if (call_instr->arg_size() != 1) - { - continue; - } + auto target_function = call_instr->getCalledFunction(); + auto name = target_function->getName(); - // Getting the size of the argument - auto size_value = call_instr->getArgOperand(0); - if (size_value == nullptr) - { - continue; - } + // TODO(tfr): Make use of TargetLibrary + if (name != "__quantum__rt__qubit_allocate_array") + { + return; + } + + if (call_instr->arg_size() != 1) + { + llvm::errs() << "Expected exactly one argument\n"; + return; + } + + auto argument = call_instr->getArgOperand(0); + if (argument == nullptr) + { + llvm::errs() << "Failed getting the size argument\n"; + return; + } + + // Checking named values + auto it = value_depending_on_args_.find(argument); + if (it != value_depending_on_args_.end()) + { + QubitArray qubit_array; + qubit_array.is_possibly_static = true; + qubit_array.variable_name = instruction.getName().str(); + qubit_array.depends_on = it->second; + + // Pushing to the result + results_.push_back(std::move(qubit_array)); + return; + } + + // Checking if it is a constant value + auto cst = llvm::dyn_cast(argument); + if (cst != nullptr) + { + QubitArray qubit_array; + qubit_array.is_possibly_static = true; + qubit_array.variable_name = instruction.getName().str(); + + // Pushing to the result + results_.push_back(std::move(qubit_array)); + + return; + } + + // Non-static array + QubitArray qubit_array; + qubit_array.is_possibly_static = false; + qubit_array.variable_name = instruction.getName().str(); + results_.push_back(std::move(qubit_array)); +} + +void ConstSizeArrayAnalysisAnalytics::analyseFunction(llvm::Function &function) +{ + results_.clear(); + + // Creating a list with function arguments + for (auto &arg : function.args()) + { + auto s = arg.getName().str(); + value_depending_on_args_.insert({&arg, {s}}); + } + + // Evaluating all expressions + for (auto &basic_block : function) + { + for (auto &instruction : basic_block) + { - // Checking if the value is constant - auto cst = llvm::dyn_cast(size_value); - if (cst == nullptr) + auto opcode = instruction.getOpcode(); + switch (opcode) { - continue; + case llvm::Instruction::Sub: + case llvm::Instruction::Add: + case llvm::Instruction::Mul: + case llvm::Instruction::Shl: + case llvm::Instruction::LShr: + case llvm::Instruction::AShr: + case llvm::Instruction::And: + case llvm::Instruction::Or: + case llvm::Instruction::Xor: + if (operandsConstant(instruction)) + { + markPossibleConstant(instruction); + } + break; + case llvm::Instruction::Call: + analyseCall(instruction); + break; + // Unanalysed statements + case llvm::Instruction::Ret: + case llvm::Instruction::Br: + case llvm::Instruction::Switch: + case llvm::Instruction::IndirectBr: + case llvm::Instruction::Invoke: + case llvm::Instruction::Resume: + case llvm::Instruction::Unreachable: + case llvm::Instruction::CleanupRet: + case llvm::Instruction::CatchRet: + case llvm::Instruction::CatchSwitch: + case llvm::Instruction::CallBr: + case llvm::Instruction::FNeg: + case llvm::Instruction::FAdd: + case llvm::Instruction::FSub: + case llvm::Instruction::FMul: + case llvm::Instruction::UDiv: + case llvm::Instruction::SDiv: + case llvm::Instruction::FDiv: + case llvm::Instruction::URem: + case llvm::Instruction::SRem: + case llvm::Instruction::FRem: + case llvm::Instruction::Alloca: + case llvm::Instruction::Load: + case llvm::Instruction::Store: + case llvm::Instruction::GetElementPtr: + case llvm::Instruction::Fence: + case llvm::Instruction::AtomicCmpXchg: + case llvm::Instruction::AtomicRMW: + case llvm::Instruction::Trunc: + case llvm::Instruction::ZExt: + case llvm::Instruction::SExt: + case llvm::Instruction::FPToUI: + case llvm::Instruction::FPToSI: + case llvm::Instruction::UIToFP: + case llvm::Instruction::SIToFP: + case llvm::Instruction::FPTrunc: + case llvm::Instruction::FPExt: + case llvm::Instruction::PtrToInt: + case llvm::Instruction::IntToPtr: + case llvm::Instruction::BitCast: + case llvm::Instruction::AddrSpaceCast: + case llvm::Instruction::CleanupPad: + case llvm::Instruction::CatchPad: + case llvm::Instruction::ICmp: + case llvm::Instruction::FCmp: + case llvm::Instruction::PHI: + case llvm::Instruction::Select: + case llvm::Instruction::UserOp1: + case llvm::Instruction::UserOp2: + case llvm::Instruction::VAArg: + case llvm::Instruction::ExtractElement: + case llvm::Instruction::InsertElement: + case llvm::Instruction::ShuffleVector: + case llvm::Instruction::ExtractValue: + case llvm::Instruction::InsertValue: + case llvm::Instruction::LandingPad: + // End of Binary Ops + default: + break; } - - result[name] = cst->getValue().getSExtValue(); } } +} + +ConstSizeArrayAnalysisAnalytics::Result ConstSizeArrayAnalysisAnalytics::run( + llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) +{ + analyseFunction(function); - return result; + return results_; } ConstSizeArrayAnalysisPrinter::ConstSizeArrayAnalysisPrinter(llvm::raw_ostream &out_stream) @@ -85,9 +257,14 @@ llvm::PreservedAnalyses ConstSizeArrayAnalysisPrinter::run(llvm::Function & out_stream_ << function.getName() << "\n"; out_stream_ << "====================" << "\n\n"; - for (auto const &size_info : results) + for (auto const &ret : results) { - out_stream_ << size_info.first() << ": " << size_info.second << "\n"; + out_stream_ << ret.variable_name << (ret.is_possibly_static ? ": " : "!"); + for (auto &x : ret.depends_on) + { + out_stream_ << x << ", "; + } + out_stream_ << "\n"; } } diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp index b30abcf9b9..5e2c1b230e 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp +++ b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp @@ -4,6 +4,9 @@ #include "Llvm.hpp" +#include +#include + namespace microsoft { namespace quantum { @@ -11,7 +14,24 @@ class ConstSizeArrayAnalysisAnalytics : public llvm::AnalysisInfoMixin { public: - using Result = llvm::StringMap; ///< Change the type of the collected date here + using String = std::string; + using ArgList = std::unordered_set; + + struct QubitArray + { + bool is_possibly_static{false}; ///< Indicates whether the array is possibly static or not + String variable_name{}; ///< Name of the qubit array + ArgList depends_on{}; ///< Function arguments that determines if it is constant or not + }; + + using Value = llvm::Value; + using DependencyGraph = std::unordered_map; + using ValueDependencyGraph = std::unordered_map; + + using Instruction = llvm::Instruction; + using Function = llvm::Function; + using QubitArrayList = std::vector; + using Result = QubitArrayList; /// Constructors and destructors /// @{ @@ -32,9 +52,31 @@ class ConstSizeArrayAnalysisAnalytics Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/); /// @} + /// Function analysis + /// @{ + void analyseFunction(llvm::Function &function); + /// @} + + /// Instruction analysis + /// @{ + bool operandsConstant(Instruction const &instruction) const; + void markPossibleConstant(Instruction &instruction); + void analyseCall(Instruction &instruction); + /// @} + private: static llvm::AnalysisKey Key; // NOLINT friend struct llvm::AnalysisInfoMixin; + + /// Analysis details + /// @{ + ValueDependencyGraph value_depending_on_args_{}; + /// @} + + /// Result + /// @{ + QubitArrayList results_{}; + /// @} }; class ConstSizeArrayAnalysisPrinter : public llvm::PassInfoMixin From bca2646b3d79ed83af9fa7bc42684142ab303c04 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 12:56:43 +0200 Subject: [PATCH 037/106] Refactoring pass --- src/Passes/examples/ConstSizeArrayAnalysis/Makefile | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile index 4050079dfe..7db0c3e199 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile +++ b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile @@ -1,7 +1,6 @@ run: build -# opt -load-pass-plugin ../../Debug/libs/libMetaData.dylib --passes="meta-data" -S analysis-problem-4.ll - opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem-4.ll + opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-problem-4.ll build: - pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make ConstSizeArrayAnalysis && popd || popd - pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make MetaData && popd || popd + pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make QubitAllocationAnalysis && popd || popd + From 75c22f6e0dfd65b92a4995276bd8a4173135d3d4 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 12:58:12 +0200 Subject: [PATCH 038/106] More refactoring --- .../ConstSizeArray/Comparison.cpp | 0 .../ConstSizeArray/ConstSizeArray.csproj | 0 .../ConstSizeArray/ConstSizeArray.qs | 0 .../ConstSizeArray/Makefile | 0 .../ConstSizeArray/comparison.ll | 0 .../Makefile | 0 .../README.md | 0 .../analysis-problem-1.ll | 0 .../analysis-problem-2.ll | 0 .../analysis-problem-3.ll | 0 .../analysis-problem-4.ll | 0 .../LibQubitAllocationAnalysis.cpp} | 16 ++-- .../QubitAllocationAnalysis.cpp} | 86 +++++++++++-------- .../QubitAllocationAnalysis.hpp} | 45 +++++----- .../SPECIFICATION.md | 0 15 files changed, 78 insertions(+), 69 deletions(-) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/ConstSizeArray/Comparison.cpp (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/ConstSizeArray/ConstSizeArray.csproj (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/ConstSizeArray/ConstSizeArray.qs (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/ConstSizeArray/Makefile (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/ConstSizeArray/comparison.ll (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/Makefile (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/README.md (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/analysis-problem-1.ll (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/analysis-problem-2.ll (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/analysis-problem-3.ll (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/analysis-problem-4.ll (100%) rename src/Passes/libs/{ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp => QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp} (63%) rename src/Passes/libs/{ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp => QubitAllocationAnalysis/QubitAllocationAnalysis.cpp} (69%) rename src/Passes/libs/{ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp => QubitAllocationAnalysis/QubitAllocationAnalysis.hpp} (52%) rename src/Passes/libs/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/SPECIFICATION.md (100%) diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Comparison.cpp b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Comparison.cpp similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Comparison.cpp rename to src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Comparison.cpp diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.csproj b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.csproj rename to src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs rename to src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile rename to src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/comparison.ll b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/comparison.ll similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/comparison.ll rename to src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/comparison.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/Makefile rename to src/Passes/examples/QubitAllocationAnalysis/Makefile diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/README.md b/src/Passes/examples/QubitAllocationAnalysis/README.md similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/README.md rename to src/Passes/examples/QubitAllocationAnalysis/README.md diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-1.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-1.ll similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-1.ll rename to src/Passes/examples/QubitAllocationAnalysis/analysis-problem-1.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-2.ll similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll rename to src/Passes/examples/QubitAllocationAnalysis/analysis-problem-2.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-3.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-3.ll similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-3.ll rename to src/Passes/examples/QubitAllocationAnalysis/analysis-problem-3.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-4.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-4.ll similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-4.ll rename to src/Passes/examples/QubitAllocationAnalysis/analysis-problem-4.ll diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp b/src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp similarity index 63% rename from src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp rename to src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp index 91d766c9b8..358514329b 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp +++ b/src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp @@ -2,26 +2,26 @@ // Licensed under the MIT License. #include "Llvm.hpp" -#include "ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp" +#include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" #include #include namespace { // Interface to plugin -llvm::PassPluginLibraryInfo getConstSizeArrayAnalysisPluginInfo() +llvm::PassPluginLibraryInfo getQubitAllocationAnalysisPluginInfo() { using namespace microsoft::quantum; using namespace llvm; return { - LLVM_PLUGIN_API_VERSION, "ConstSizeArrayAnalysis", LLVM_VERSION_STRING, [](PassBuilder &pb) { + LLVM_PLUGIN_API_VERSION, "QubitAllocationAnalysis", LLVM_VERSION_STRING, [](PassBuilder &pb) { // Registering a printer for the anaylsis pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, ArrayRef /*unused*/) { - if (name == "print") + if (name == "print") { - fpm.addPass(ConstSizeArrayAnalysisPrinter(llvm::errs())); + fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); return true; } return false; @@ -29,12 +29,12 @@ llvm::PassPluginLibraryInfo getConstSizeArrayAnalysisPluginInfo() pb.registerVectorizerStartEPCallback( [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { - fpm.addPass(ConstSizeArrayAnalysisPrinter(llvm::errs())); + fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); }); // Registering the analysis module pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { - fam.registerPass([] { return ConstSizeArrayAnalysisAnalytics(); }); + fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); }); }}; } @@ -43,5 +43,5 @@ llvm::PassPluginLibraryInfo getConstSizeArrayAnalysisPluginInfo() extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getConstSizeArrayAnalysisPluginInfo(); + return getQubitAllocationAnalysisPluginInfo(); } diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp similarity index 69% rename from src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp rename to src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp index 2a8d0eaab6..12dfce428d 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp +++ b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp @@ -1,9 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp" - #include "Llvm.hpp" +#include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" #include #include @@ -12,15 +11,22 @@ namespace microsoft { namespace quantum { -bool ConstSizeArrayAnalysisAnalytics::operandsConstant(Instruction const &instruction) const +bool QubitAllocationAnalysisAnalytics::operandsConstant(Instruction const &instruction) const { + // Default is true (i.e. the case of no operands) bool ret = true; // Checking that all oprands are constant for (auto &op : instruction.operands()) { - auto const_arg = value_depending_on_args_.find(op) != value_depending_on_args_.end(); + // An operand is constant if its value was previously generated from + // a const expression ... + auto const_arg = constantness_dependencies_.find(op) != constantness_dependencies_.end(); + + // ... or if it is just a compile time constant. Note that we + // delibrately only consider integers. We may expand this + // to other constants once we have function support. auto cst = llvm::dyn_cast(op); auto is_constant = (cst != nullptr); @@ -30,27 +36,18 @@ bool ConstSizeArrayAnalysisAnalytics::operandsConstant(Instruction const &instru return ret; } -void ConstSizeArrayAnalysisAnalytics::markPossibleConstant(Instruction &instruction) +void QubitAllocationAnalysisAnalytics::markPossibleConstant(Instruction &instruction) { - /* - // Rename constant variables - if (!instruction.hasName()) - { - // Naming result - char new_name[64] = {0}; - auto fmt = llvm::format("microsoft_reserved_possible_const_ret%u", tmp_counter_); - fmt.print(new_name, 64); - instruction.setName(new_name); - } - */ - // Creating arg dependencies ArgList all_dependencies{}; for (auto &op : instruction.operands()) { - auto it = value_depending_on_args_.find(op); - if (it != value_depending_on_args_.end()) + // If the operand has dependecies ... + auto it = constantness_dependencies_.find(op); + if (it != constantness_dependencies_.end()) { + // ... we add these as a dependency for the + // resulting instructions value for (auto &arg : it->second) { all_dependencies.insert(arg); @@ -58,11 +55,11 @@ void ConstSizeArrayAnalysisAnalytics::markPossibleConstant(Instruction &instruct } } - // Adding the new name to the list - value_depending_on_args_.insert({&instruction, all_dependencies}); + // Adding full list of dependices to the dependency graph + constantness_dependencies_.insert({&instruction, all_dependencies}); } -void ConstSizeArrayAnalysisAnalytics::analyseCall(Instruction &instruction) +void QubitAllocationAnalysisAnalytics::analyseCall(Instruction &instruction) { // Skipping debug code if (instruction.isDebugOrPseudoInst()) @@ -70,27 +67,32 @@ void ConstSizeArrayAnalysisAnalytics::analyseCall(Instruction &instruction) return; } + // Recovering the call information auto *call_instr = llvm::dyn_cast(&instruction); if (call_instr == nullptr) { return; } + // Getting the name of the function being called auto target_function = call_instr->getCalledFunction(); auto name = target_function->getName(); - // TODO(tfr): Make use of TargetLibrary + // TODO(tfr): Make use of TargetLibraryInfo if (name != "__quantum__rt__qubit_allocate_array") { return; } + // We expect only a single argument with the number + // of qubits allocated if (call_instr->arg_size() != 1) { llvm::errs() << "Expected exactly one argument\n"; return; } + // Next we extract the argument ... auto argument = call_instr->getArgOperand(0); if (argument == nullptr) { @@ -98,10 +100,12 @@ void ConstSizeArrayAnalysisAnalytics::analyseCall(Instruction &instruction) return; } - // Checking named values - auto it = value_depending_on_args_.find(argument); - if (it != value_depending_on_args_.end()) + // ... and checks whether it is a result of a dependant + // const expression + auto it = constantness_dependencies_.find(argument); + if (it != constantness_dependencies_.end()) { + // If it is, we add the details to the result list QubitArray qubit_array; qubit_array.is_possibly_static = true; qubit_array.variable_name = instruction.getName().str(); @@ -112,7 +116,8 @@ void ConstSizeArrayAnalysisAnalytics::analyseCall(Instruction &instruction) return; } - // Checking if it is a constant value + // Otherwise, it may be a static allocation based on a constant (or + // folded constant) auto cst = llvm::dyn_cast(argument); if (cst != nullptr) { @@ -126,22 +131,26 @@ void ConstSizeArrayAnalysisAnalytics::analyseCall(Instruction &instruction) return; } - // Non-static array + // If neither of the previous is the case, we are dealing with a non-static array QubitArray qubit_array; qubit_array.is_possibly_static = false; qubit_array.variable_name = instruction.getName().str(); + + // Storing the result results_.push_back(std::move(qubit_array)); } -void ConstSizeArrayAnalysisAnalytics::analyseFunction(llvm::Function &function) +void QubitAllocationAnalysisAnalytics::analyseFunction(llvm::Function &function) { + // Clearing results generated in a previous run results_.clear(); + constantness_dependencies_.clear(); // Creating a list with function arguments for (auto &arg : function.args()) { auto s = arg.getName().str(); - value_depending_on_args_.insert({&arg, {s}}); + constantness_dependencies_.insert({&arg, {s}}); } // Evaluating all expressions @@ -149,7 +158,6 @@ void ConstSizeArrayAnalysisAnalytics::analyseFunction(llvm::Function &function) { for (auto &instruction : basic_block) { - auto opcode = instruction.getOpcode(); switch (opcode) { @@ -235,22 +243,24 @@ void ConstSizeArrayAnalysisAnalytics::analyseFunction(llvm::Function &function) } } -ConstSizeArrayAnalysisAnalytics::Result ConstSizeArrayAnalysisAnalytics::run( +QubitAllocationAnalysisAnalytics::Result QubitAllocationAnalysisAnalytics::run( llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) { + // Running functin analysis analyseFunction(function); + // ... and return the result. return results_; } -ConstSizeArrayAnalysisPrinter::ConstSizeArrayAnalysisPrinter(llvm::raw_ostream &out_stream) +QubitAllocationAnalysisPrinter::QubitAllocationAnalysisPrinter(llvm::raw_ostream &out_stream) : out_stream_(out_stream) {} -llvm::PreservedAnalyses ConstSizeArrayAnalysisPrinter::run(llvm::Function & function, - llvm::FunctionAnalysisManager &fam) +llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run(llvm::Function & function, + llvm::FunctionAnalysisManager &fam) { - auto &results = fam.getResult(function); + auto &results = fam.getResult(function); if (!results.empty()) { @@ -271,12 +281,12 @@ llvm::PreservedAnalyses ConstSizeArrayAnalysisPrinter::run(llvm::Function & return llvm::PreservedAnalyses::all(); } -bool ConstSizeArrayAnalysisPrinter::isRequired() +bool QubitAllocationAnalysisPrinter::isRequired() { return true; } -llvm::AnalysisKey ConstSizeArrayAnalysisAnalytics::Key; +llvm::AnalysisKey QubitAllocationAnalysisAnalytics::Key; } // namespace quantum } // namespace microsoft diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp similarity index 52% rename from src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp rename to src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp index 5e2c1b230e..865e2f5fbb 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp +++ b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp @@ -10,8 +10,8 @@ namespace microsoft { namespace quantum { -class ConstSizeArrayAnalysisAnalytics - : public llvm::AnalysisInfoMixin +class QubitAllocationAnalysisAnalytics + : public llvm::AnalysisInfoMixin { public: using String = std::string; @@ -28,23 +28,22 @@ class ConstSizeArrayAnalysisAnalytics using DependencyGraph = std::unordered_map; using ValueDependencyGraph = std::unordered_map; - using Instruction = llvm::Instruction; - using Function = llvm::Function; - using QubitArrayList = std::vector; - using Result = QubitArrayList; + using Instruction = llvm::Instruction; + using Function = llvm::Function; + using Result = std::vector; /// Constructors and destructors /// @{ - ConstSizeArrayAnalysisAnalytics() = default; - ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics const &) = delete; - ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics &&) = default; - ~ConstSizeArrayAnalysisAnalytics() = default; + QubitAllocationAnalysisAnalytics() = default; + QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics const &) = delete; + QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics &&) = default; + ~QubitAllocationAnalysisAnalytics() = default; /// @} /// Operators /// @{ - ConstSizeArrayAnalysisAnalytics &operator=(ConstSizeArrayAnalysisAnalytics const &) = delete; - ConstSizeArrayAnalysisAnalytics &operator=(ConstSizeArrayAnalysisAnalytics &&) = delete; + QubitAllocationAnalysisAnalytics &operator=(QubitAllocationAnalysisAnalytics const &) = delete; + QubitAllocationAnalysisAnalytics &operator=(QubitAllocationAnalysisAnalytics &&) = delete; /// @} /// Functions required by LLVM @@ -66,35 +65,35 @@ class ConstSizeArrayAnalysisAnalytics private: static llvm::AnalysisKey Key; // NOLINT - friend struct llvm::AnalysisInfoMixin; + friend struct llvm::AnalysisInfoMixin; /// Analysis details /// @{ - ValueDependencyGraph value_depending_on_args_{}; + ValueDependencyGraph constantness_dependencies_{}; /// @} /// Result /// @{ - QubitArrayList results_{}; + Result results_{}; /// @} }; -class ConstSizeArrayAnalysisPrinter : public llvm::PassInfoMixin +class QubitAllocationAnalysisPrinter : public llvm::PassInfoMixin { public: /// Constructors and destructors /// @{ - explicit ConstSizeArrayAnalysisPrinter(llvm::raw_ostream &out_stream); - ConstSizeArrayAnalysisPrinter() = delete; - ConstSizeArrayAnalysisPrinter(ConstSizeArrayAnalysisPrinter const &) = delete; - ConstSizeArrayAnalysisPrinter(ConstSizeArrayAnalysisPrinter &&) = default; - ~ConstSizeArrayAnalysisPrinter() = default; + explicit QubitAllocationAnalysisPrinter(llvm::raw_ostream &out_stream); + QubitAllocationAnalysisPrinter() = delete; + QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter const &) = delete; + QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter &&) = default; + ~QubitAllocationAnalysisPrinter() = default; /// @} /// Operators /// @{ - ConstSizeArrayAnalysisPrinter &operator=(ConstSizeArrayAnalysisPrinter const &) = delete; - ConstSizeArrayAnalysisPrinter &operator=(ConstSizeArrayAnalysisPrinter &&) = delete; + QubitAllocationAnalysisPrinter &operator=(QubitAllocationAnalysisPrinter const &) = delete; + QubitAllocationAnalysisPrinter &operator=(QubitAllocationAnalysisPrinter &&) = delete; /// @} /// Functions required by LLVM diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/SPECIFICATION.md b/src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md similarity index 100% rename from src/Passes/libs/ConstSizeArrayAnalysis/SPECIFICATION.md rename to src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md From ab7ddf64d19f7843d5d5b3bd52fcf74a7e85b39e Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 13:42:44 +0200 Subject: [PATCH 039/106] Adding documenation --- .../ConstSizeArray/ConstSizeArray.qs | 2 +- .../ConstSizeArray/Makefile | 6 + .../ConstSizeArray/comparison.ll | 75 ------ .../examples/QubitAllocationAnalysis/Makefile | 11 +- .../QubitAllocationAnalysis/README.md | 235 +++++++++++------- ...lysis-problem-4.ll => analysis-example.ll} | 4 + .../analysis-problem-1.ll | 50 ---- .../analysis-problem-2.ll | 51 ---- .../analysis-problem-3.ll | 52 ---- .../QubitAllocationAnalysis.cpp | 33 ++- .../QubitAllocationAnalysis.hpp | 11 +- .../QubitAllocationAnalysis/SPECIFICATION.md | 9 + 12 files changed, 208 insertions(+), 331 deletions(-) delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/comparison.ll rename src/Passes/examples/QubitAllocationAnalysis/{analysis-problem-4.ll => analysis-example.ll} (91%) delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/analysis-problem-1.ll delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/analysis-problem-2.ll delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/analysis-problem-3.ll diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index ae895648ed..c8c78d1aea 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -1,5 +1,4 @@ namespace Example { - @EntryPoint() operation Main() : Int { @@ -17,6 +16,7 @@ namespace Example { let z = x * (x + 1) - 47; let y = 3 * x; + use qubits0 = Qubit[9]; use qubits1 = Qubit[(y - 2)/2-z]; use qubits2 = Qubit[y - g]; use qubits3 = Qubit[h]; diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile index fa97ab5b45..59399d367e 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile @@ -1,5 +1,11 @@ +analysis-example.ll: + dotnet build ConstSizeArray.csproj + opt -S qir/ConstSizeArray.ll -O1 > ../analysis-example.ll + make clean + comparison: clang++ -S -emit-llvm -std=c++14 -stdlib=libc++ Comparison.cpp -o comparison.ll + clean: rm -rf bin rm -rf obj diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/comparison.ll b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/comparison.ll deleted file mode 100644 index 0e2c5308a3..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/comparison.ll +++ /dev/null @@ -1,75 +0,0 @@ -; ModuleID = 'Comparison.cpp' -source_filename = "Comparison.cpp" -target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-apple-macosx11.0.0" - -; Function Attrs: noinline nounwind optnone ssp uwtable mustprogress -define dso_local void @_Z15QuantumFunctioni(i32 %0) #0 { - %2 = alloca i32, align 4 - %3 = alloca i64, align 8 - %4 = alloca i64, align 8 - %5 = alloca i8*, align 8 - %6 = alloca i64, align 8 - store i32 %0, i32* %2, align 4 - store volatile i64 3, i64* %3, align 8 - store i64 0, i64* %4, align 8 - br label %7 - -7: ; preds = %15, %1 - %8 = load i64, i64* %4, align 8 - %9 = load volatile i64, i64* %3, align 8 - %10 = icmp ult i64 %8, %9 - br i1 %10, label %11, label %18 - -11: ; preds = %7 - %12 = load i32, i32* %2, align 4 - %13 = load i32, i32* %2, align 4 - %14 = add nsw i32 %13, %12 - store i32 %14, i32* %2, align 4 - br label %15 - -15: ; preds = %11 - %16 = load i64, i64* %4, align 8 - %17 = add i64 %16, 1 - store i64 %17, i64* %4, align 8 - br label %7, !llvm.loop !3 - -18: ; preds = %7 - %19 = load i32, i32* %2, align 4 - %20 = zext i32 %19 to i64 - %21 = call i8* @llvm.stacksave() - store i8* %21, i8** %5, align 8 - %22 = alloca i32, i64 %20, align 16 - store i64 %20, i64* %6, align 8 - %23 = load i8*, i8** %5, align 8 - call void @llvm.stackrestore(i8* %23) - ret void -} - -; Function Attrs: nofree nosync nounwind willreturn -declare i8* @llvm.stacksave() #1 - -; Function Attrs: nofree nosync nounwind willreturn -declare void @llvm.stackrestore(i8*) #1 - -; Function Attrs: noinline norecurse nounwind optnone ssp uwtable mustprogress -define dso_local i32 @main() #2 { - %1 = alloca i32, align 4 - store i32 0, i32* %1, align 4 - call void @_Z15QuantumFunctioni(i32 10) - call void @_Z15QuantumFunctioni(i32 3) - ret i32 0 -} - -attributes #0 = { noinline nounwind optnone ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nofree nosync nounwind willreturn } -attributes #2 = { noinline norecurse nounwind optnone ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } - -!llvm.module.flags = !{!0, !1} -!llvm.ident = !{!2} - -!0 = !{i32 1, !"wchar_size", i32 4} -!1 = !{i32 7, !"PIC Level", i32 2} -!2 = !{!"Homebrew clang version 12.0.1"} -!3 = distinct !{!3, !4} -!4 = !{!"llvm.loop.mustprogress"} diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index 7db0c3e199..dd808878eb 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -1,6 +1,13 @@ -run: build - opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-problem-4.ll +run: build analysis-example.ll + opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll build: pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make QubitAllocationAnalysis && popd || popd + +analysis-example.ll: + cd ConstSizeArray && make analysis-example.ll + +clean: + cd ConstSizeArray && make clean + rm analysis-example.ll \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/README.md b/src/Passes/examples/QubitAllocationAnalysis/README.md index fba3828ece..383ee026ed 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/README.md +++ b/src/Passes/examples/QubitAllocationAnalysis/README.md @@ -1,160 +1,209 @@ -# ConstSizeArray +# QubitAllocationAnalysis -## Running the analysis +## Quick start -Ensure that you have build the latest version of the pass +The following depnds on: + +- A working LLVM installation, including paths correctly setup +- CMake +- C#, Q# and the .NET framework + +Running following command ```sh -% make build +% make run ``` +will first build the pass, then build the QIR using Q# following by removing the noise using `opt` with optimisation level 1. Finally, it will execute the analysis pass and should provide you with information about qubit allocation in the Q# program defined in `ConstSizeArray/ConstSizeArray.qs`. + +## Detailed run + +From the Passes root (two levels up from this directory), make a new build + ```sh -% opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem.ll +% mkdir Debug +% cd Debug +% cmake .. ``` -## Cases to consider +and then compile the `QubitAllocationAnalysis`: -```qsharp -namespace Example { +```sh +% make QubitAllocationAnalysis +``` - @EntryPoint() - operation Main() : Int - { - QuantumProgram(); - return 0; - } +Next return `examples/QubitAllocationAnalysis` and enter the directory `ConstSizeArray` to build the QIR: +```sh +% make analysis-example.ll +``` +or execute the commands manually, - operation QuantumProgram(x: Int) : Unit { - use qubits = Qubit[3]; - } +```sh +% dotnet build ConstSizeArray.csproj +% opt -S qir/ConstSizeArray.ll -O1 > ../analysis-example.ll +% make clean +``` -} +Returning to `examples/QubitAllocationAnalysis`, the pass can now be ran by executing: + +```sh +% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll ``` +## Example cases + +Below we will consider a few different examples. You can run them by updating the code in `ConstSizeArray/ConstSizeArray.qs` and executing `make run` from the `examples/QubitAllocationAnalysis` folder subsequently. You will need to delete `analysis-example.ll` between runs. + +### Trivially constant + +This is the simplest example we can think of: + ```qsharp namespace Example { - @EntryPoint() - operation Main() : Int - { - QuantumProgram(3); - QuantumProgram(4); - return 0; + operation QuantumProgram() : Unit { + use qubits = Qubit[3]; } +} +``` +The corresponding QIR is: +``` +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" - operation QuantumProgram(x: Int) : Unit { - use qubits = Qubit[x]; - } +%Array = type opaque +define internal fastcc void @Example__QuantumProgram__body() unnamed_addr { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void } + +; (...) ``` -```qsharp -namespace Example { +Running the pass procudes following output: - @EntryPoint() - operation Main() : Int - { - QuantumProgram(3); - QuantumProgram(4); - return 0; - } +``` +% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +Example__QuantumProgram__body +==================== +qubits is trivially static with 3 qubits. +``` - operation QuantumProgram(x: Int) : Unit { - use qubits = Qubit[x * x]; - } +### Dependency case -} -``` +In some cases, a qubit array will be compile time constant in size if the function arguments +provided are compile-time constants. One example of this is: -```qsharp +``` namespace Example { - @EntryPoint() operation Main() : Int { - QuantumProgram(ComputeNumberOfQubits); + QuantumProgram(3); + QuantumProgram(4); return 0; } - function ComputeNumberOfQubits(x: Int): Int { - return x * x; + operation QuantumProgram(x: Int) : Unit { + use qubits = Qubit[x]; } +} +``` - operation QuantumProgram(fnc : Int -> Int) : Unit { - use qubits = Qubit[fnc(3)]; - } +The corresponding QIR is -} ``` +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" -## Generating an example QIR +%Array = type opaque +%String = type opaque -Build the QIR +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumProgram__body(i64 3) + call fastcc void @Example__QuantumProgram__body(i64 4) + ret void +} + +define internal fastcc void @Example__QuantumProgram__body(i64 %x) unnamed_addr { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %x) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void +} +; ( ... ) -```sh -cd ConstSizeArray -dotnet build ConstSizeArray.csproj ``` -Strip it of unecessary information +The analyser returns following output: -```sh -opt -S qir/ConstSizeArray.ll -O1 > qir/Problem.ll ``` +% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll -Result should be similar to +Example__QuantumProgram__body +==================== -``` -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" +qubits depends on x being constant to be static. -%Array = type opaque -%String = type opaque +``` -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr +### Summary case -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr +Finally, we do a summary case that demonstrates some of the more elaborate cases: -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr +``` +namespace Example { + @EntryPoint() + operation Main() : Int + { + QuantumProgram(3,2,1); + QuantumProgram(4,9,4); + return 0; + } -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + function X(value: Int): Int + { + return 3 * value; + } -define i64 @Example__Main__Interop() local_unnamed_addr #0 { -entry: - %qubits.i.i = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 10) - tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 1) - tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 -1) - tail call void @__quantum__rt__qubit_release_array(%Array* %qubits.i.i) - ret i64 0 -} + operation QuantumProgram(x: Int, h: Int, g: Int) : Unit { + let z = x * (x + 1) - 47; + let y = 3 * x; -define void @Example__Main() local_unnamed_addr #1 { -entry: - %qubits.i.i = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 10) - tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 1) - tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 -1) - tail call void @__quantum__rt__qubit_release_array(%Array* %qubits.i.i) - %0 = tail call %String* @__quantum__rt__int_to_string(i64 0) - tail call void @__quantum__rt__message(%String* %0) - tail call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void + use qubits0 = Qubit[9]; + use qubits1 = Qubit[(y - 2)/2-z]; + use qubits2 = Qubit[y - g]; + use qubits3 = Qubit[h]; + use qubits4 = Qubit[X(x)]; + } } +``` -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr +We will omit the QIR in the documenation as it is a long. The output of the anaysis is: -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } ``` +% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll -# Notes +Example__QuantumProgram__body +==================== -To make a proper version of Const Size deduction, look at the [constant folding](https://llvm.org/doxygen/ConstantFolding_8cpp_source.html) implementation and in particular, the [target library](https://llvm.org/doxygen/classllvm_1_1TargetLibraryInfo.html) which essentially promotes information about the runtime. +qubits0 is trivially static with 9 qubits. +qubits1 depends on x being constant to be static. +qubits2 depends on x, g being constant to be static. +qubits3 depends on h being constant to be static. +qubits4 is dynamic. +``` diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-4.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll similarity index 91% rename from src/Passes/examples/QubitAllocationAnalysis/analysis-problem-4.ll rename to src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll index 420563f193..b87010d9d2 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-4.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -17,6 +17,8 @@ entry: %.neg1 = mul i64 %.neg, %x %z.neg = add i64 %.neg1, 47 %y = mul i64 %x, 3 + %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) %0 = add i64 %y, -2 %1 = lshr i64 %0, 1 %2 = add i64 %z.neg, %1 @@ -38,6 +40,8 @@ entry: call void @__quantum__rt__qubit_release_array(%Array* %qubits2) call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) call void @__quantum__rt__qubit_release_array(%Array* %qubits1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits0) ret void } diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-1.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-1.ll deleted file mode 100644 index 01315ee268..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-1.ll +++ /dev/null @@ -1,50 +0,0 @@ -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Array = type opaque -%String = type opaque - -define internal fastcc void @Example__Main__body() unnamed_addr { -entry: - call fastcc void @Example__QuantumFunction__body() - ret void -} - -define internal fastcc void @Example__QuantumFunction__body() unnamed_addr { -entry: - %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 10) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits) - ret void -} - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define i64 @Example__Main__Interop() local_unnamed_addr #0 { -entry: - call fastcc void @Example__Main__body() - ret i64 0 -} - -define void @Example__Main() local_unnamed_addr #1 { -entry: - call fastcc void @Example__Main__body() - %0 = call %String* @__quantum__rt__int_to_string(i64 0) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-2.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-2.ll deleted file mode 100644 index ea4ead0400..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-2.ll +++ /dev/null @@ -1,51 +0,0 @@ -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Array = type opaque -%String = type opaque - -define internal fastcc void @Example__Main__body() unnamed_addr { -entry: - call fastcc void @Example__QuantumProgram__body(i64 3) - call fastcc void @Example__QuantumProgram__body(i64 4) - ret void -} - -define internal fastcc void @Example__QuantumProgram__body(i64 %x) unnamed_addr { -entry: - %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %x) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits) - ret void -} - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define i64 @Example__Main__Interop() local_unnamed_addr #0 { -entry: - call fastcc void @Example__Main__body() - ret i64 0 -} - -define void @Example__Main() local_unnamed_addr #1 { -entry: - call fastcc void @Example__Main__body() - %0 = call %String* @__quantum__rt__int_to_string(i64 0) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-3.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-3.ll deleted file mode 100644 index 65703f0d8e..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-3.ll +++ /dev/null @@ -1,52 +0,0 @@ -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Array = type opaque -%String = type opaque - -define internal fastcc void @Example__Main__body() unnamed_addr { -entry: - call fastcc void @Example__QuantumProgram__body(i64 3) - call fastcc void @Example__QuantumProgram__body(i64 4) - ret void -} - -define internal fastcc void @Example__QuantumProgram__body(i64 %x) unnamed_addr { -entry: - %0 = mul i64 %x, %x - %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %0) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits) - ret void -} - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define i64 @Example__Main__Interop() local_unnamed_addr #0 { -entry: - call fastcc void @Example__Main__body() - ret i64 0 -} - -define void @Example__Main() local_unnamed_addr #1 { -entry: - call fastcc void @Example__Main__body() - %0 = call %String* @__quantum__rt__int_to_string(i64 0) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } diff --git a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp index 12dfce428d..0da8007336 100644 --- a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp +++ b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp @@ -1,9 +1,10 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" #include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" +#include "Llvm.hpp" + #include #include #include @@ -124,6 +125,7 @@ void QubitAllocationAnalysisAnalytics::analyseCall(Instruction &instruction) QubitArray qubit_array; qubit_array.is_possibly_static = true; qubit_array.variable_name = instruction.getName().str(); + qubit_array.size = cst->getZExtValue(); // Pushing to the result results_.push_back(std::move(qubit_array)); @@ -269,11 +271,34 @@ llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run(llvm::Function & << "\n\n"; for (auto const &ret : results) { - out_stream_ << ret.variable_name << (ret.is_possibly_static ? ": " : "!"); - for (auto &x : ret.depends_on) + if (!ret.is_possibly_static) + { + out_stream_ << ret.variable_name << " is dynamic.\n"; + } + else { - out_stream_ << x << ", "; + if (ret.depends_on.empty()) + { + out_stream_ << ret.variable_name << " is trivially static with " << ret.size + << " qubits."; + } + else + { + out_stream_ << ret.variable_name << " depends on "; + bool first = true; + for (auto &x : ret.depends_on) + { + if (!first) + { + out_stream_ << ", "; + } + out_stream_ << x; + first = false; + } + out_stream_ << " being constant to be static."; + } } + out_stream_ << "\n"; } } diff --git a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp index 865e2f5fbb..a3ca7382b2 100644 --- a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp +++ b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp @@ -19,9 +19,14 @@ class QubitAllocationAnalysisAnalytics struct QubitArray { - bool is_possibly_static{false}; ///< Indicates whether the array is possibly static or not - String variable_name{}; ///< Name of the qubit array - ArgList depends_on{}; ///< Function arguments that determines if it is constant or not + bool is_possibly_static{false}; ///< Indicates whether the array is + /// possibly static or not + /// + String variable_name{}; ///< Name of the qubit array + ArgList depends_on{}; ///< Function arguments that + /// determines if it is constant or not + /// + uint64_t size{static_cast(-1)}; ///< Size of the array if it can be deduced. }; using Value = llvm::Value; diff --git a/src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md b/src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md index e69de29bb2..4c2781d605 100644 --- a/src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md +++ b/src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md @@ -0,0 +1,9 @@ +# Qubit Allocation Analysis + +## Purpose + +The purpose of this pass is to analyse the code for qubit allocations and identify +the allocation dependency. This helps subsequent transfomation passes expand the code +to, for instance, eliminate loops and classical logic. This is desirable as the control +logic for some quantum computing systems may be limited and one may therefore wish +to reduce its complexity as much as possible at compile time. From 0ae8d26adc40db982923ffd9397137825ea42665 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 13:44:57 +0200 Subject: [PATCH 040/106] CI and style --- .../LibQubitAllocationAnalysis.cpp | 62 +- .../QubitAllocationAnalysis.cpp | 560 +++++++++--------- .../QubitAllocationAnalysis.hpp | 207 +++---- 3 files changed, 419 insertions(+), 410 deletions(-) diff --git a/src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp b/src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp index 358514329b..ac03bc1f41 100644 --- a/src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp +++ b/src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp @@ -2,46 +2,50 @@ // Licensed under the MIT License. #include "Llvm.hpp" + #include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" #include #include -namespace { +namespace +{ // Interface to plugin llvm::PassPluginLibraryInfo getQubitAllocationAnalysisPluginInfo() { - using namespace microsoft::quantum; - using namespace llvm; - - return { - LLVM_PLUGIN_API_VERSION, "QubitAllocationAnalysis", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering a printer for the anaylsis - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "print") - { - fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); - return true; - } - return false; - }); - - pb.registerVectorizerStartEPCallback( - [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { - fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); - }); - - // Registering the analysis module - pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { - fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); - }); - }}; + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "QubitAllocationAnalysis", LLVM_VERSION_STRING, + [](PassBuilder& pb) + { + // Registering a printer for the anaylsis + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) + { + if (name == "print") + { + fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); + return true; + } + return false; + }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager& fpm, llvm::PassBuilder::OptimizationLevel /*level*/) + { fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback( + [](FunctionAnalysisManager& fam) + { fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); }); + }}; } -} // namespace +} // namespace extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getQubitAllocationAnalysisPluginInfo(); + return getQubitAllocationAnalysisPluginInfo(); } diff --git a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp index 0da8007336..60ef6aaeb7 100644 --- a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp +++ b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp @@ -1,317 +1,321 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" - #include "Llvm.hpp" +#include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" + #include #include #include -namespace microsoft { -namespace quantum { - -bool QubitAllocationAnalysisAnalytics::operandsConstant(Instruction const &instruction) const +namespace microsoft +{ +namespace quantum { - // Default is true (i.e. the case of no operands) - bool ret = true; - // Checking that all oprands are constant - for (auto &op : instruction.operands()) - { + bool QubitAllocationAnalysisAnalytics::operandsConstant(Instruction const& instruction) const + { + // Default is true (i.e. the case of no operands) + bool ret = true; - // An operand is constant if its value was previously generated from - // a const expression ... - auto const_arg = constantness_dependencies_.find(op) != constantness_dependencies_.end(); + // Checking that all oprands are constant + for (auto& op : instruction.operands()) + { - // ... or if it is just a compile time constant. Note that we - // delibrately only consider integers. We may expand this - // to other constants once we have function support. - auto cst = llvm::dyn_cast(op); - auto is_constant = (cst != nullptr); + // An operand is constant if its value was previously generated from + // a const expression ... + auto const_arg = constantness_dependencies_.find(op) != constantness_dependencies_.end(); - ret = ret && (const_arg || is_constant); - } + // ... or if it is just a compile time constant. Note that we + // delibrately only consider integers. We may expand this + // to other constants once we have function support. + auto cst = llvm::dyn_cast(op); + auto is_constant = (cst != nullptr); - return ret; -} + ret = ret && (const_arg || is_constant); + } -void QubitAllocationAnalysisAnalytics::markPossibleConstant(Instruction &instruction) -{ - // Creating arg dependencies - ArgList all_dependencies{}; - for (auto &op : instruction.operands()) - { - // If the operand has dependecies ... - auto it = constantness_dependencies_.find(op); - if (it != constantness_dependencies_.end()) - { - // ... we add these as a dependency for the - // resulting instructions value - for (auto &arg : it->second) - { - all_dependencies.insert(arg); - } + return ret; } - } - - // Adding full list of dependices to the dependency graph - constantness_dependencies_.insert({&instruction, all_dependencies}); -} -void QubitAllocationAnalysisAnalytics::analyseCall(Instruction &instruction) -{ - // Skipping debug code - if (instruction.isDebugOrPseudoInst()) - { - return; - } - - // Recovering the call information - auto *call_instr = llvm::dyn_cast(&instruction); - if (call_instr == nullptr) - { - return; - } - - // Getting the name of the function being called - auto target_function = call_instr->getCalledFunction(); - auto name = target_function->getName(); - - // TODO(tfr): Make use of TargetLibraryInfo - if (name != "__quantum__rt__qubit_allocate_array") - { - return; - } - - // We expect only a single argument with the number - // of qubits allocated - if (call_instr->arg_size() != 1) - { - llvm::errs() << "Expected exactly one argument\n"; - return; - } - - // Next we extract the argument ... - auto argument = call_instr->getArgOperand(0); - if (argument == nullptr) - { - llvm::errs() << "Failed getting the size argument\n"; - return; - } - - // ... and checks whether it is a result of a dependant - // const expression - auto it = constantness_dependencies_.find(argument); - if (it != constantness_dependencies_.end()) - { - // If it is, we add the details to the result list - QubitArray qubit_array; - qubit_array.is_possibly_static = true; - qubit_array.variable_name = instruction.getName().str(); - qubit_array.depends_on = it->second; - - // Pushing to the result - results_.push_back(std::move(qubit_array)); - return; - } - - // Otherwise, it may be a static allocation based on a constant (or - // folded constant) - auto cst = llvm::dyn_cast(argument); - if (cst != nullptr) - { - QubitArray qubit_array; - qubit_array.is_possibly_static = true; - qubit_array.variable_name = instruction.getName().str(); - qubit_array.size = cst->getZExtValue(); - - // Pushing to the result - results_.push_back(std::move(qubit_array)); - - return; - } - - // If neither of the previous is the case, we are dealing with a non-static array - QubitArray qubit_array; - qubit_array.is_possibly_static = false; - qubit_array.variable_name = instruction.getName().str(); - - // Storing the result - results_.push_back(std::move(qubit_array)); -} - -void QubitAllocationAnalysisAnalytics::analyseFunction(llvm::Function &function) -{ - // Clearing results generated in a previous run - results_.clear(); - constantness_dependencies_.clear(); - - // Creating a list with function arguments - for (auto &arg : function.args()) - { - auto s = arg.getName().str(); - constantness_dependencies_.insert({&arg, {s}}); - } - - // Evaluating all expressions - for (auto &basic_block : function) - { - for (auto &instruction : basic_block) + void QubitAllocationAnalysisAnalytics::markPossibleConstant(Instruction& instruction) { - auto opcode = instruction.getOpcode(); - switch (opcode) - { - case llvm::Instruction::Sub: - case llvm::Instruction::Add: - case llvm::Instruction::Mul: - case llvm::Instruction::Shl: - case llvm::Instruction::LShr: - case llvm::Instruction::AShr: - case llvm::Instruction::And: - case llvm::Instruction::Or: - case llvm::Instruction::Xor: - if (operandsConstant(instruction)) + // Creating arg dependencies + ArgList all_dependencies{}; + for (auto& op : instruction.operands()) { - markPossibleConstant(instruction); + // If the operand has dependecies ... + auto it = constantness_dependencies_.find(op); + if (it != constantness_dependencies_.end()) + { + // ... we add these as a dependency for the + // resulting instructions value + for (auto& arg : it->second) + { + all_dependencies.insert(arg); + } + } } - break; - case llvm::Instruction::Call: - analyseCall(instruction); - break; - // Unanalysed statements - case llvm::Instruction::Ret: - case llvm::Instruction::Br: - case llvm::Instruction::Switch: - case llvm::Instruction::IndirectBr: - case llvm::Instruction::Invoke: - case llvm::Instruction::Resume: - case llvm::Instruction::Unreachable: - case llvm::Instruction::CleanupRet: - case llvm::Instruction::CatchRet: - case llvm::Instruction::CatchSwitch: - case llvm::Instruction::CallBr: - case llvm::Instruction::FNeg: - case llvm::Instruction::FAdd: - case llvm::Instruction::FSub: - case llvm::Instruction::FMul: - case llvm::Instruction::UDiv: - case llvm::Instruction::SDiv: - case llvm::Instruction::FDiv: - case llvm::Instruction::URem: - case llvm::Instruction::SRem: - case llvm::Instruction::FRem: - case llvm::Instruction::Alloca: - case llvm::Instruction::Load: - case llvm::Instruction::Store: - case llvm::Instruction::GetElementPtr: - case llvm::Instruction::Fence: - case llvm::Instruction::AtomicCmpXchg: - case llvm::Instruction::AtomicRMW: - case llvm::Instruction::Trunc: - case llvm::Instruction::ZExt: - case llvm::Instruction::SExt: - case llvm::Instruction::FPToUI: - case llvm::Instruction::FPToSI: - case llvm::Instruction::UIToFP: - case llvm::Instruction::SIToFP: - case llvm::Instruction::FPTrunc: - case llvm::Instruction::FPExt: - case llvm::Instruction::PtrToInt: - case llvm::Instruction::IntToPtr: - case llvm::Instruction::BitCast: - case llvm::Instruction::AddrSpaceCast: - case llvm::Instruction::CleanupPad: - case llvm::Instruction::CatchPad: - case llvm::Instruction::ICmp: - case llvm::Instruction::FCmp: - case llvm::Instruction::PHI: - case llvm::Instruction::Select: - case llvm::Instruction::UserOp1: - case llvm::Instruction::UserOp2: - case llvm::Instruction::VAArg: - case llvm::Instruction::ExtractElement: - case llvm::Instruction::InsertElement: - case llvm::Instruction::ShuffleVector: - case llvm::Instruction::ExtractValue: - case llvm::Instruction::InsertValue: - case llvm::Instruction::LandingPad: - // End of Binary Ops - default: - break; - } + + // Adding full list of dependices to the dependency graph + constantness_dependencies_.insert({&instruction, all_dependencies}); } - } -} -QubitAllocationAnalysisAnalytics::Result QubitAllocationAnalysisAnalytics::run( - llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) -{ - // Running functin analysis - analyseFunction(function); + void QubitAllocationAnalysisAnalytics::analyseCall(Instruction& instruction) + { + // Skipping debug code + if (instruction.isDebugOrPseudoInst()) + { + return; + } - // ... and return the result. - return results_; -} + // Recovering the call information + auto* call_instr = llvm::dyn_cast(&instruction); + if (call_instr == nullptr) + { + return; + } -QubitAllocationAnalysisPrinter::QubitAllocationAnalysisPrinter(llvm::raw_ostream &out_stream) - : out_stream_(out_stream) -{} + // Getting the name of the function being called + auto target_function = call_instr->getCalledFunction(); + auto name = target_function->getName(); -llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run(llvm::Function & function, - llvm::FunctionAnalysisManager &fam) -{ - auto &results = fam.getResult(function); - - if (!results.empty()) - { - out_stream_ << function.getName() << "\n"; - out_stream_ << "====================" - << "\n\n"; - for (auto const &ret : results) + // TODO(tfr): Make use of TargetLibraryInfo + if (name != "__quantum__rt__qubit_allocate_array") + { + return; + } + + // We expect only a single argument with the number + // of qubits allocated + if (call_instr->arg_size() != 1) + { + llvm::errs() << "Expected exactly one argument\n"; + return; + } + + // Next we extract the argument ... + auto argument = call_instr->getArgOperand(0); + if (argument == nullptr) + { + llvm::errs() << "Failed getting the size argument\n"; + return; + } + + // ... and checks whether it is a result of a dependant + // const expression + auto it = constantness_dependencies_.find(argument); + if (it != constantness_dependencies_.end()) + { + // If it is, we add the details to the result list + QubitArray qubit_array; + qubit_array.is_possibly_static = true; + qubit_array.variable_name = instruction.getName().str(); + qubit_array.depends_on = it->second; + + // Pushing to the result + results_.push_back(std::move(qubit_array)); + return; + } + + // Otherwise, it may be a static allocation based on a constant (or + // folded constant) + auto cst = llvm::dyn_cast(argument); + if (cst != nullptr) + { + QubitArray qubit_array; + qubit_array.is_possibly_static = true; + qubit_array.variable_name = instruction.getName().str(); + qubit_array.size = cst->getZExtValue(); + + // Pushing to the result + results_.push_back(std::move(qubit_array)); + + return; + } + + // If neither of the previous is the case, we are dealing with a non-static array + QubitArray qubit_array; + qubit_array.is_possibly_static = false; + qubit_array.variable_name = instruction.getName().str(); + + // Storing the result + results_.push_back(std::move(qubit_array)); + } + + void QubitAllocationAnalysisAnalytics::analyseFunction(llvm::Function& function) { - if (!ret.is_possibly_static) - { - out_stream_ << ret.variable_name << " is dynamic.\n"; - } - else - { - if (ret.depends_on.empty()) + // Clearing results generated in a previous run + results_.clear(); + constantness_dependencies_.clear(); + + // Creating a list with function arguments + for (auto& arg : function.args()) { - out_stream_ << ret.variable_name << " is trivially static with " << ret.size - << " qubits."; + auto s = arg.getName().str(); + constantness_dependencies_.insert({&arg, {s}}); } - else + + // Evaluating all expressions + for (auto& basic_block : function) { - out_stream_ << ret.variable_name << " depends on "; - bool first = true; - for (auto &x : ret.depends_on) - { - if (!first) + for (auto& instruction : basic_block) { - out_stream_ << ", "; + auto opcode = instruction.getOpcode(); + switch (opcode) + { + case llvm::Instruction::Sub: + case llvm::Instruction::Add: + case llvm::Instruction::Mul: + case llvm::Instruction::Shl: + case llvm::Instruction::LShr: + case llvm::Instruction::AShr: + case llvm::Instruction::And: + case llvm::Instruction::Or: + case llvm::Instruction::Xor: + if (operandsConstant(instruction)) + { + markPossibleConstant(instruction); + } + break; + case llvm::Instruction::Call: + analyseCall(instruction); + break; + // Unanalysed statements + case llvm::Instruction::Ret: + case llvm::Instruction::Br: + case llvm::Instruction::Switch: + case llvm::Instruction::IndirectBr: + case llvm::Instruction::Invoke: + case llvm::Instruction::Resume: + case llvm::Instruction::Unreachable: + case llvm::Instruction::CleanupRet: + case llvm::Instruction::CatchRet: + case llvm::Instruction::CatchSwitch: + case llvm::Instruction::CallBr: + case llvm::Instruction::FNeg: + case llvm::Instruction::FAdd: + case llvm::Instruction::FSub: + case llvm::Instruction::FMul: + case llvm::Instruction::UDiv: + case llvm::Instruction::SDiv: + case llvm::Instruction::FDiv: + case llvm::Instruction::URem: + case llvm::Instruction::SRem: + case llvm::Instruction::FRem: + case llvm::Instruction::Alloca: + case llvm::Instruction::Load: + case llvm::Instruction::Store: + case llvm::Instruction::GetElementPtr: + case llvm::Instruction::Fence: + case llvm::Instruction::AtomicCmpXchg: + case llvm::Instruction::AtomicRMW: + case llvm::Instruction::Trunc: + case llvm::Instruction::ZExt: + case llvm::Instruction::SExt: + case llvm::Instruction::FPToUI: + case llvm::Instruction::FPToSI: + case llvm::Instruction::UIToFP: + case llvm::Instruction::SIToFP: + case llvm::Instruction::FPTrunc: + case llvm::Instruction::FPExt: + case llvm::Instruction::PtrToInt: + case llvm::Instruction::IntToPtr: + case llvm::Instruction::BitCast: + case llvm::Instruction::AddrSpaceCast: + case llvm::Instruction::CleanupPad: + case llvm::Instruction::CatchPad: + case llvm::Instruction::ICmp: + case llvm::Instruction::FCmp: + case llvm::Instruction::PHI: + case llvm::Instruction::Select: + case llvm::Instruction::UserOp1: + case llvm::Instruction::UserOp2: + case llvm::Instruction::VAArg: + case llvm::Instruction::ExtractElement: + case llvm::Instruction::InsertElement: + case llvm::Instruction::ShuffleVector: + case llvm::Instruction::ExtractValue: + case llvm::Instruction::InsertValue: + case llvm::Instruction::LandingPad: + // End of Binary Ops + default: + break; + } } - out_stream_ << x; - first = false; - } - out_stream_ << " being constant to be static."; } - } + } - out_stream_ << "\n"; + QubitAllocationAnalysisAnalytics::Result QubitAllocationAnalysisAnalytics::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& /*unused*/) + { + // Running functin analysis + analyseFunction(function); + + // ... and return the result. + return results_; } - } - return llvm::PreservedAnalyses::all(); -} + QubitAllocationAnalysisPrinter::QubitAllocationAnalysisPrinter(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) + { + } -bool QubitAllocationAnalysisPrinter::isRequired() -{ - return true; -} + llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& fam) + { + auto& results = fam.getResult(function); + + if (!results.empty()) + { + out_stream_ << function.getName() << "\n"; + out_stream_ << "====================" + << "\n\n"; + for (auto const& ret : results) + { + if (!ret.is_possibly_static) + { + out_stream_ << ret.variable_name << " is dynamic.\n"; + } + else + { + if (ret.depends_on.empty()) + { + out_stream_ << ret.variable_name << " is trivially static with " << ret.size << " qubits."; + } + else + { + out_stream_ << ret.variable_name << " depends on "; + bool first = true; + for (auto& x : ret.depends_on) + { + if (!first) + { + out_stream_ << ", "; + } + out_stream_ << x; + first = false; + } + out_stream_ << " being constant to be static."; + } + } + + out_stream_ << "\n"; + } + } + + return llvm::PreservedAnalyses::all(); + } + + bool QubitAllocationAnalysisPrinter::isRequired() + { + return true; + } -llvm::AnalysisKey QubitAllocationAnalysisAnalytics::Key; + llvm::AnalysisKey QubitAllocationAnalysisAnalytics::Key; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp index a3ca7382b2..215388be56 100644 --- a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp +++ b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp @@ -7,108 +7,109 @@ #include #include -namespace microsoft { -namespace quantum { - -class QubitAllocationAnalysisAnalytics - : public llvm::AnalysisInfoMixin +namespace microsoft { -public: - using String = std::string; - using ArgList = std::unordered_set; - - struct QubitArray - { - bool is_possibly_static{false}; ///< Indicates whether the array is - /// possibly static or not - /// - String variable_name{}; ///< Name of the qubit array - ArgList depends_on{}; ///< Function arguments that - /// determines if it is constant or not - /// - uint64_t size{static_cast(-1)}; ///< Size of the array if it can be deduced. - }; - - using Value = llvm::Value; - using DependencyGraph = std::unordered_map; - using ValueDependencyGraph = std::unordered_map; - - using Instruction = llvm::Instruction; - using Function = llvm::Function; - using Result = std::vector; - - /// Constructors and destructors - /// @{ - QubitAllocationAnalysisAnalytics() = default; - QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics const &) = delete; - QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics &&) = default; - ~QubitAllocationAnalysisAnalytics() = default; - /// @} - - /// Operators - /// @{ - QubitAllocationAnalysisAnalytics &operator=(QubitAllocationAnalysisAnalytics const &) = delete; - QubitAllocationAnalysisAnalytics &operator=(QubitAllocationAnalysisAnalytics &&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/); - /// @} - - /// Function analysis - /// @{ - void analyseFunction(llvm::Function &function); - /// @} - - /// Instruction analysis - /// @{ - bool operandsConstant(Instruction const &instruction) const; - void markPossibleConstant(Instruction &instruction); - void analyseCall(Instruction &instruction); - /// @} - -private: - static llvm::AnalysisKey Key; // NOLINT - friend struct llvm::AnalysisInfoMixin; - - /// Analysis details - /// @{ - ValueDependencyGraph constantness_dependencies_{}; - /// @} - - /// Result - /// @{ - Result results_{}; - /// @} -}; - -class QubitAllocationAnalysisPrinter : public llvm::PassInfoMixin +namespace quantum { -public: - /// Constructors and destructors - /// @{ - explicit QubitAllocationAnalysisPrinter(llvm::raw_ostream &out_stream); - QubitAllocationAnalysisPrinter() = delete; - QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter const &) = delete; - QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter &&) = default; - ~QubitAllocationAnalysisPrinter() = default; - /// @} - - /// Operators - /// @{ - QubitAllocationAnalysisPrinter &operator=(QubitAllocationAnalysisPrinter const &) = delete; - QubitAllocationAnalysisPrinter &operator=(QubitAllocationAnalysisPrinter &&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); - static bool isRequired(); - /// @} -private: - llvm::raw_ostream &out_stream_; -}; - -} // namespace quantum -} // namespace microsoft + + class QubitAllocationAnalysisAnalytics : public llvm::AnalysisInfoMixin + { + public: + using String = std::string; + using ArgList = std::unordered_set; + + struct QubitArray + { + bool is_possibly_static{false}; ///< Indicates whether the array is + /// possibly static or not + /// + String variable_name{}; ///< Name of the qubit array + ArgList depends_on{}; ///< Function arguments that + /// determines if it is constant or not + /// + uint64_t size{static_cast(-1)}; ///< Size of the array if it can be deduced. + }; + + using Value = llvm::Value; + using DependencyGraph = std::unordered_map; + using ValueDependencyGraph = std::unordered_map; + + using Instruction = llvm::Instruction; + using Function = llvm::Function; + using Result = std::vector; + + /// Constructors and destructors + /// @{ + QubitAllocationAnalysisAnalytics() = default; + QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics const&) = delete; + QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics&&) = default; + ~QubitAllocationAnalysisAnalytics() = default; + /// @} + + /// Operators + /// @{ + QubitAllocationAnalysisAnalytics& operator=(QubitAllocationAnalysisAnalytics const&) = delete; + QubitAllocationAnalysisAnalytics& operator=(QubitAllocationAnalysisAnalytics&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); + /// @} + + /// Function analysis + /// @{ + void analyseFunction(llvm::Function& function); + /// @} + + /// Instruction analysis + /// @{ + bool operandsConstant(Instruction const& instruction) const; + void markPossibleConstant(Instruction& instruction); + void analyseCall(Instruction& instruction); + /// @} + + private: + static llvm::AnalysisKey Key; // NOLINT + friend struct llvm::AnalysisInfoMixin; + + /// Analysis details + /// @{ + ValueDependencyGraph constantness_dependencies_{}; + /// @} + + /// Result + /// @{ + Result results_{}; + /// @} + }; + + class QubitAllocationAnalysisPrinter : public llvm::PassInfoMixin + { + public: + /// Constructors and destructors + /// @{ + explicit QubitAllocationAnalysisPrinter(llvm::raw_ostream& out_stream); + QubitAllocationAnalysisPrinter() = delete; + QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter const&) = delete; + QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter&&) = default; + ~QubitAllocationAnalysisPrinter() = default; + /// @} + + /// Operators + /// @{ + QubitAllocationAnalysisPrinter& operator=(QubitAllocationAnalysisPrinter const&) = delete; + QubitAllocationAnalysisPrinter& operator=(QubitAllocationAnalysisPrinter&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} + private: + llvm::raw_ostream& out_stream_; + }; + +} // namespace quantum +} // namespace microsoft From 436f5445cc0e0155a1c736410d1ed178862f9bee Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 13:55:05 +0200 Subject: [PATCH 041/106] Adding documentation as per request --- src/Passes/docs/continous-integration.md | 91 ++++++++++++++++++++++++ src/Passes/docs/library-structure.md | 38 ++++++++++ 2 files changed, 129 insertions(+) create mode 100644 src/Passes/docs/continous-integration.md create mode 100644 src/Passes/docs/library-structure.md diff --git a/src/Passes/docs/continous-integration.md b/src/Passes/docs/continous-integration.md new file mode 100644 index 0000000000..08d05c0738 --- /dev/null +++ b/src/Passes/docs/continous-integration.md @@ -0,0 +1,91 @@ +# Continuous integration + +This component is the largest part of this PR. The continuous integration component includes: + +1. Style formatting to ensure that everything looks the same. This includes checking that relevant copyrights are in place. +2. Static analysis +3. Unit testing + +The automatic style enforcement is configurable with the ability to easily add or remove rules. Currently the source pipelines are defined as: + +```python +SOURCE_PIPELINES = [ + { + "name": "C++ Main", + "src": path.join(PROJECT_ROOT, "libs"), + + "pipelines": { + "hpp": [ + require_pragma_once, + enforce_cpp_license, + enforce_formatting + ], + "cpp": [ + enforce_cpp_license, + enforce_formatting + ] + } + }, + # ... +] +``` + +This part defines pipelines for `.hpp` files and `.cpp` files allowing the developer to add such requirements such as having copyright in the op of the source file and ensure that formatting follows that given by `.clang-format`. + +Each of these CI stages can executed individually using `./manage` or you can run the entire CI process by invoking `./manage runci`. An example of what this may look like is here: + +```zsh +% ./manage runci + +2021-07-21 14:38:04,896 - FormatChecker - ERROR - /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/src/OpsCounter/OpsCounter.cpp was not correctly formatted. +2021-07-21 14:38:04,899 - FormatChecker - ERROR - Your code did not pass formatting. + +% ./manage stylecheck --fix-issues +% ./manage runci + +-- Found LLVM 11.1.0 +-- Using LLVMConfig.cmake in: /usr/local/opt/llvm@11/lib/cmake/llvm +-- Configuring done +-- Generating done +-- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/Debug +Consolidate compiler generated dependencies of target QSharpPasses +[ 50%] Building CXX object CMakeFiles/QSharpPasses.dir/src/OpsCounter/OpsCounter.cpp.o +[100%] Linking CXX shared library libQSharpPasses.dylib +ld: warning: directory not found for option '-L/usr/local/opt/llvm/lib' +[100%] Built target QSharpPasses +/Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/src/OpsCounter/OpsCounter.cpp:29:7: error: invalid case style for class 'LegacyOpsCounterPass' [readability-identifier-naming,-warnings-as-errors] +class LegacyOpsCounterPass : public FunctionPass + ^~~~~~~~~~~~~~~~~~~~ + CLegacyOpsCounterPass +113345 warnings generated. +Suppressed 113345 warnings (113344 in non-user code, 1 NOLINT). +Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well. +1 warning treated as error +2021-07-21 14:38:40,191 - Linter - ERROR - /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/src/OpsCounter/OpsCounter.cpp failed static analysis + +# ISSUES FIXED MANUALLY +% ./manage runci + +-- Found LLVM 11.1.0 +-- Using LLVMConfig.cmake in: /usr/local/opt/llvm@11/lib/cmake/llvm +-- Configuring done +-- Generating done +-- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/Debug +Consolidate compiler generated dependencies of target QSharpPasses +[ 50%] Building CXX object CMakeFiles/QSharpPasses.dir/src/OpsCounter/OpsCounter.cpp.o +[100%] Linking CXX shared library libQSharpPasses.dylib +ld: warning: directory not found for option '-L/usr/local/opt/llvm/lib' +[100%] Built target QSharpPasses +-- Found LLVM 11.1.0 +-- Using LLVMConfig.cmake in: /usr/local/opt/llvm@11/lib/cmake/llvm +-- Configuring done +-- Generating done +-- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/Debug +Consolidate compiler generated dependencies of target QSharpPasses +[100%] Built target QSharpPasses +********************************* +No test configuration file found! +********************************* +``` + +The key idea here is to make it extremely easy to be complaint with the style guide, correct any issues that might come as a result of static analysis and at the same time enforce this when a PR is made. diff --git a/src/Passes/docs/library-structure.md b/src/Passes/docs/library-structure.md new file mode 100644 index 0000000000..d1977cb0e1 --- /dev/null +++ b/src/Passes/docs/library-structure.md @@ -0,0 +1,38 @@ +# Library structure for passes + +An important part of this PR is that it proposes a structure for passes: It is suggested that each pass has their own subcode base. The reason for this proposal is that it makes it very easy to add and remove passes as well as decide which passes to link against. Each pass is kept in its own subdirectory under `libs`: + +``` +libs +├── CMakeLists.txt +└── OpsCounter + ├── OpsCounter.cpp + └── OpsCounter.hpp +``` + +Adding a new pass is easy using the `manage` tool developed in this PR: + +``` +% ./manage create-pass HelloWorld +Available templates: + +1. Function Pass + +Select a template:1 +``` + +which results in a new pass code in the `libs`: + +``` +libs +├── CMakeLists.txt +├── HelloWorld +│ ├── HelloWorld.cpp +│ ├── HelloWorld.hpp +│ └── SPECIFICATION.md +└── OpsCounter + ├── OpsCounter.cpp + └── OpsCounter.hpp +``` + +A full example of how to create a basic function pass is included in the README.md file for anyone interested. From ee7a0972f638c58c826c6e2013ce91e9f8c87793 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 13:59:49 +0200 Subject: [PATCH 042/106] Minor documentation update --- src/Passes/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Passes/README.md b/src/Passes/README.md index e8107da1e8..6419c757b0 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -185,7 +185,7 @@ and then make your target make [target] ``` -Valid targets are the name of the folders in `libs/` found in the passes root. +The default target is `all`. Other valid targets are the name of the folders in `libs/` found in the passes root. ## Running a pass From 162d5ce7070db1bc55495de945ba67de375fdcc2 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 15:35:42 +0200 Subject: [PATCH 043/106] Adding lit tests --- src/Passes/CMakeLists.txt | 2 +- src/Passes/docs/continous-integration.md | 14 +++++ src/Passes/requirements.txt | 1 + src/Passes/site-packages/TasksCI/builder.py | 15 +++++- src/Passes/tests/CMakeLists.txt | 10 ++++ .../tests/QubitAllocationAnalysis/case1.ll | 15 ++++++ .../tests/QubitAllocationAnalysis/case2.ll | 14 +++++ .../inputs/static-qubit-arrays-1.ll | 51 +++++++++++++++++++ .../inputs/static-qubit-arrays-2.ll} | 0 src/Passes/tests/lit.cfg.py | 37 ++++++++++++++ src/Passes/tests/lit.site.cfg.py.in | 15 ++++++ 11 files changed, 172 insertions(+), 2 deletions(-) create mode 100644 src/Passes/tests/CMakeLists.txt create mode 100644 src/Passes/tests/QubitAllocationAnalysis/case1.ll create mode 100644 src/Passes/tests/QubitAllocationAnalysis/case2.ll create mode 100644 src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.ll rename src/Passes/{examples/QubitAllocationAnalysis/analysis-example.ll => tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.ll} (100%) create mode 100644 src/Passes/tests/lit.cfg.py create mode 100644 src/Passes/tests/lit.site.cfg.py.in diff --git a/src/Passes/CMakeLists.txt b/src/Passes/CMakeLists.txt index 0a55f495dd..f7ccafaa22 100644 --- a/src/Passes/CMakeLists.txt +++ b/src/Passes/CMakeLists.txt @@ -46,4 +46,4 @@ include_directories(${CMAKE_SOURCE_DIR}/src) # Adding the libraries add_subdirectory(libs) - +add_subdirectory(tests) \ No newline at end of file diff --git a/src/Passes/docs/continous-integration.md b/src/Passes/docs/continous-integration.md index 08d05c0738..30cc273ef5 100644 --- a/src/Passes/docs/continous-integration.md +++ b/src/Passes/docs/continous-integration.md @@ -1,3 +1,17 @@ +# Running tests + +In order to run the tests, you first need to build the library. Assuming that this is already done and the corresponding build is in `Debug/`, run the tests from the `Debug` folder: + +``` +% lit tests/ -v +-- Testing: 2 tests, 2 workers -- +PASS: Quantum-Passes :: QubitAllocationAnalysis/case1.ll (1 of 2) +PASS: Quantum-Passes :: QubitAllocationAnalysis/case2.ll (2 of 2) + +Testing Time: 0.27s + Passed: 2 +``` + # Continuous integration This component is the largest part of this PR. The continuous integration component includes: diff --git a/src/Passes/requirements.txt b/src/Passes/requirements.txt index 77c1d85ae8..b937a83e88 100644 --- a/src/Passes/requirements.txt +++ b/src/Passes/requirements.txt @@ -1 +1,2 @@ click==8.0.1 +lit==12.0.1 diff --git a/src/Passes/site-packages/TasksCI/builder.py b/src/Passes/site-packages/TasksCI/builder.py index bd6b574b04..f11df780b9 100644 --- a/src/Passes/site-packages/TasksCI/builder.py +++ b/src/Passes/site-packages/TasksCI/builder.py @@ -64,7 +64,17 @@ def run_tests(build_dir: str, concurrency=None): """ Runs the unit tests given a build directory. """ + fail = False + # Running lit tests + lit_cmd = ["lit", "tests/", "-v"] + exit_code = subprocess.call(lit_cmd, cwd=build_dir) + + if exit_code != 0: + logger.error('Lit test failed') + fail = True + + # Running CMake tests cmake_cmd = [toolchain.discover_ctest()] if concurrency is not None: @@ -72,7 +82,10 @@ def run_tests(build_dir: str, concurrency=None): exit_code = subprocess.call(cmake_cmd, cwd=build_dir) if exit_code != 0: - logger.error('Failed to configure project') + logger.error('CTest failed project') + fail = True + + if fail: sys.exit(exit_code) diff --git a/src/Passes/tests/CMakeLists.txt b/src/Passes/tests/CMakeLists.txt new file mode 100644 index 0000000000..60d980f867 --- /dev/null +++ b/src/Passes/tests/CMakeLists.txt @@ -0,0 +1,10 @@ +set(LT_TEST_SHLIBEXT "${CMAKE_SHARED_LIBRARY_SUFFIX}") + +set(LT_TEST_SITE_CFG_INPUT "${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in") +set(LT_TEST_SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}") + +set(LIT_SITE_CFG_IN_HEADER "## Autogenerated from ${LT_TEST_SITE_CFG_INPUT}\n## Do not edit!") + +configure_file("${LT_TEST_SITE_CFG_INPUT}" + "${CMAKE_CURRENT_BINARY_DIR}/lit.cfg.py" @ONLY +) diff --git a/src/Passes/tests/QubitAllocationAnalysis/case1.ll b/src/Passes/tests/QubitAllocationAnalysis/case1.ll new file mode 100644 index 0000000000..cab5557980 --- /dev/null +++ b/src/Passes/tests/QubitAllocationAnalysis/case1.ll @@ -0,0 +1,15 @@ +; RUN: opt -load-pass-plugin %shlibdir/libQubitAllocationAnalysis%shlibext -passes="print" %S/inputs/static-qubit-arrays-1.ll -disable-output 2>&1\ +; RUN: | FileCheck %s + +;------------------------------------------------------------------------------ +; EXPECTED OUTPUT +;------------------------------------------------------------------------------ + +; CHECK: Example__QuantumProgram__body +; CHECK: ==================== + +; CHECK: qubits depends on x being constant to be static. + + + + diff --git a/src/Passes/tests/QubitAllocationAnalysis/case2.ll b/src/Passes/tests/QubitAllocationAnalysis/case2.ll new file mode 100644 index 0000000000..7f90c61a50 --- /dev/null +++ b/src/Passes/tests/QubitAllocationAnalysis/case2.ll @@ -0,0 +1,14 @@ +; RUN: opt -load-pass-plugin %shlibdir/libQubitAllocationAnalysis%shlibext -passes="print" %S/inputs/static-qubit-arrays-2.ll -disable-output 2>&1\ +; RUN: | FileCheck %s + +;------------------------------------------------------------------------------ +; EXPECTED OUTPUT +;------------------------------------------------------------------------------ + +; CHECK: Example__QuantumProgram__body +; CHECK: ==================== +; CHECK: qubits0 is trivially static with 9 qubits. +; CHECK: qubits1 depends on x being constant to be static. +; CHECK: qubits2 depends on x, g being constant to be static. +; CHECK: qubits3 depends on h being constant to be static. +; CHECK: qubits4 is dynamic. diff --git a/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.ll b/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.ll new file mode 100644 index 0000000000..ea4ead0400 --- /dev/null +++ b/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.ll @@ -0,0 +1,51 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumProgram__body(i64 3) + call fastcc void @Example__QuantumProgram__body(i64 4) + ret void +} + +define internal fastcc void @Example__QuantumProgram__body(i64 %x) unnamed_addr { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %x) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #0 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.ll similarity index 100% rename from src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll rename to src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.ll diff --git a/src/Passes/tests/lit.cfg.py b/src/Passes/tests/lit.cfg.py new file mode 100644 index 0000000000..68e1c797f7 --- /dev/null +++ b/src/Passes/tests/lit.cfg.py @@ -0,0 +1,37 @@ +# -*- Python -*- +import platform +import lit.formats +from lit.llvm import llvm_config +from lit.llvm.subst import ToolSubst +import shutil + +config.llvm_tools_dir = os.path.dirname(shutil.which("opt")) +config.name = 'Quantum-Passes' +config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell) +config.suffixes = ['.ll'] +config.test_source_root = os.path.dirname(__file__) +config.excludes = ['inputs', "*/inputs", "**/inputs"] + +if platform.system() == 'Darwin': + tool_substitutions = [ + ToolSubst('%clang', "clang", + extra_args=["-isysroot", + "`xcrun --show-sdk-path`", + "-mlinker-version=0"]), + ] +else: + tool_substitutions = [ + ToolSubst('%clang', "clang", + ) + ] +llvm_config.add_tool_substitutions(tool_substitutions) +tools = ["opt", "lli", "not", "FileCheck", "clang"] +llvm_config.add_tool_substitutions(tools, config.llvm_tools_dir) +config.substitutions.append(('%shlibext', config.llvm_shlib_ext)) +config.substitutions.append(('%shlibdir', config.llvm_shlib_dir)) + + +# References: +# https://github.com/banach-space/llvm-tutor +# http://lists.llvm.org/pipermail/cfe-dev/2016-July/049868.html +# https://github.com/Homebrew/homebrew-core/issues/52461 diff --git a/src/Passes/tests/lit.site.cfg.py.in b/src/Passes/tests/lit.site.cfg.py.in new file mode 100644 index 0000000000..c6888bfbf2 --- /dev/null +++ b/src/Passes/tests/lit.site.cfg.py.in @@ -0,0 +1,15 @@ +import sys + +config.llvm_tools_dir = "@LT_LLVM_INSTALL_DIR@/bin" +config.llvm_shlib_ext = "@LT_TEST_SHLIBEXT@" +config.llvm_shlib_dir = "@CMAKE_BINARY_DIR@/libs" + +import lit.llvm +# lit_config is a global instance of LitConfig +lit.llvm.initialize(lit_config, config) + +# test_exec_root: The root path where tests should be run. +config.test_exec_root = os.path.join("@CMAKE_CURRENT_BINARY_DIR@") + +# Let the main config do the real work. +lit_config.load_config(config, "@LT_TEST_SRC_DIR@/lit.cfg.py") From c5a1e84b8737fe0c61a74f9e132703048b34a0df Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 28 Jul 2021 16:35:23 +0200 Subject: [PATCH 044/106] Finalising expansion pass --- .../ConstSizeArray/ConstSizeArray.qs | 7 +- .../examples/QubitAllocationAnalysis/Makefile | 20 +- .../examples/QubitAllocationAnalysis/test.txt | 199 ++++++++++++++++ src/Passes/include/Llvm.hpp | 16 +- .../ExpandStaticAllocation.cpp | 223 ++++++++++++++++++ .../ExpandStaticAllocation.hpp | 46 ++++ .../LibExpandStaticAllocation.cpp | 38 +++ .../ExpandStaticAllocation/SPECIFICATION.md | 1 + 8 files changed, 544 insertions(+), 6 deletions(-) create mode 100644 src/Passes/examples/QubitAllocationAnalysis/test.txt create mode 100644 src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp create mode 100644 src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp create mode 100644 src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp create mode 100644 src/Passes/libs/ExpandStaticAllocation/SPECIFICATION.md diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index c8c78d1aea..0b5655ddec 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -2,8 +2,9 @@ namespace Example { @EntryPoint() operation Main() : Int { + QuantumProgram(3,2,1); - QuantumProgram(4,9,4); + QuantumProgram(4,X(2),4); return 0; } @@ -21,5 +22,9 @@ namespace Example { use qubits2 = Qubit[y - g]; use qubits3 = Qubit[h]; use qubits4 = Qubit[X(x)]; + + for idxIteration in 0..g { + //Message(idxIteration); + } } } \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index dd808878eb..feedf8753f 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -1,8 +1,20 @@ -run: build analysis-example.ll +run-expand: build-qaa build-esa analysis-example.ll + opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib \ + -load-pass-plugin ../../Debug/libs/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll + + +run: build-qaa analysis-example.ll opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll -build: - pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make QubitAllocationAnalysis && popd || popd + +build-prepare: + pushd ../../ && mkdir -p Debug && cd Debug && cmake ..&& popd || popd + +build-qaa: build-prepare + pushd ../../Debug && make QubitAllocationAnalysis && popd || popd + +build-esa: build-prepare + pushd ../../Debug && make ExpandStaticAllocation && popd || popd analysis-example.ll: @@ -10,4 +22,4 @@ analysis-example.ll: clean: cd ConstSizeArray && make clean - rm analysis-example.ll \ No newline at end of file + rm analysis-example.ll diff --git a/src/Passes/examples/QubitAllocationAnalysis/test.txt b/src/Passes/examples/QubitAllocationAnalysis/test.txt new file mode 100644 index 0000000000..133f3304be --- /dev/null +++ b/src/Passes/examples/QubitAllocationAnalysis/test.txt @@ -0,0 +1,199 @@ +pushd ../../ && mkdir -p Debug && cd Debug && cmake ..&& popd || popd +~/Documents/Projects/qsharp-compiler/src/Passes ~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis +-- Found LLVM 12.0.1 +-- Using LLVMConfig.cmake in: /usr/local/opt/llvm/lib/cmake/llvm +-- Configuring done +-- Generating done +-- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/Passes/Debug +~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis +pushd ../../Debug && make QubitAllocationAnalysis && popd || popd +~/Documents/Projects/qsharp-compiler/src/Passes/Debug ~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis +Consolidate compiler generated dependencies of target QubitAllocationAnalysis +[100%] Built target QubitAllocationAnalysis +~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis +pushd ../../Debug && make ExpandStaticAllocation && popd || popd +~/Documents/Projects/qsharp-compiler/src/Passes/Debug ~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis +Consolidate compiler generated dependencies of target ExpandStaticAllocation +[100%] Built target ExpandStaticAllocation +~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis +opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib \ + -load-pass-plugin ../../Debug/libs/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll +; ModuleID = 'analysis-example.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumProgram__body(i64 3, i64 2, i64 1) + call fastcc void @Example__QuantumProgram__body(i64 4, i64 9, i64 4) + ret void +} + +define internal fastcc void @Example__QuantumProgram__body(i64 %x, i64 %h, i64 %g) unnamed_addr { +entry: + %.neg = xor i64 %x, -1 + %.neg1 = mul i64 %.neg, %x + %z.neg = add i64 %.neg1, 47 + %y = mul i64 %x, 3 + %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) + %0 = add i64 %y, -2 + %1 = lshr i64 %0, 1 + %2 = add i64 %z.neg, %1 + %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) + %3 = sub i64 %y, %g + %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) + %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %h) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) + %4 = call fastcc i64 @Example__X__body(i64 %x) + %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) + br label %header__1 + +header__1: ; preds = %header__1, %entry + %idxIteration = phi i64 [ 0, %entry ], [ %5, %header__1 ] + %.not = icmp sgt i64 %idxIteration, %g + %5 = add i64 %idxIteration, 1 + br i1 %.not, label %exit__1, label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits0) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +; Function Attrs: norecurse nounwind readnone willreturn +define internal fastcc i64 @Example__X__body(i64 %value) unnamed_addr #0 { +entry: + %0 = mul i64 %value, 3 + ret i64 %0 +} + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #2 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +define internal fastcc void @Example__QuantumProgram__body.1() unnamed_addr { +entry: + %.neg = xor i64 3, -1 + %.neg1 = mul i64 %.neg, 3 + %z.neg = add i64 %.neg1, 47 + %y = mul i64 3, 3 + %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) + %0 = add i64 %y, -2 + %1 = lshr i64 %0, 1 + %2 = add i64 %z.neg, %1 + %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) + %3 = sub i64 %y, 1 + %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) + %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) + %4 = call fastcc i64 @Example__X__body(i64 3) + %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) + br label %header__1 + +header__1: ; preds = %header__1, %entry + %idxIteration = phi i64 [ 0, %entry ], [ %5, %header__1 ] + %.not = icmp sgt i64 %idxIteration, 1 + %5 = add i64 %idxIteration, 1 + br i1 %.not, label %exit__1, label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits0) + ret void +} + +define internal fastcc void @Example__QuantumProgram__body.2() unnamed_addr { +entry: + %.neg = xor i64 4, -1 + %.neg1 = mul i64 %.neg, 4 + %z.neg = add i64 %.neg1, 47 + %y = mul i64 4, 3 + %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) + %0 = add i64 %y, -2 + %1 = lshr i64 %0, 1 + %2 = add i64 %z.neg, %1 + %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) + %3 = sub i64 %y, 4 + %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) + %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) + %4 = call fastcc i64 @Example__X__body(i64 4) + %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) + br label %header__1 + +header__1: ; preds = %header__1, %entry + %idxIteration = phi i64 [ 0, %entry ], [ %5, %header__1 ] + %.not = icmp sgt i64 %idxIteration, 4 + %5 = add i64 %idxIteration, 1 + br i1 %.not, label %exit__1, label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits0) + ret void +} + +attributes #0 = { norecurse nounwind readnone willreturn } +attributes #1 = { "InteropFriendly" } +attributes #2 = { "EntryPoint" } diff --git a/src/Passes/include/Llvm.hpp b/src/Passes/include/Llvm.hpp index f24aef3726..80a4728b83 100644 --- a/src/Passes/include/Llvm.hpp +++ b/src/Passes/include/Llvm.hpp @@ -27,10 +27,24 @@ #pragma clang diagnostic ignored "-Weverything" #endif -#include "llvm/IR/LegacyPassManager.h" +// Passes #include "llvm/Passes/PassBuilder.h" #include "llvm/Passes/PassPlugin.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Cloning.h" + +// Building +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/Verifier.h" #if defined(__clang__) #pragma clang diagnostic pop diff --git a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp new file mode 100644 index 0000000000..154c4f9617 --- /dev/null +++ b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp @@ -0,0 +1,223 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" + +#include "Llvm.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +llvm::PreservedAnalyses ExpandStaticAllocationPass::run(llvm::Function & function, + llvm::FunctionAnalysisManager &fam) +{ + // Pass body + for (auto &basic_block : function) + { + // Keeping track of instructions to remove in each block + std::vector to_remove; + + for (auto &instruction : basic_block) + { + // Finding calls + auto *call_instr = llvm::dyn_cast(&instruction); + if (call_instr == nullptr) + { + continue; + } + + ConstantArguments argument_constants{}; + std::vector remaining_arguments{}; + + auto callee_function = call_instr->getCalledFunction(); + auto &depenency_graph = fam.getResult(*callee_function); + + if (depenency_graph.size() > 0) + { + uint32_t idx = 0; + uint32_t N = static_cast(callee_function->arg_size()); + + // Finding argument constants + while (idx < N) + { + auto arg = callee_function->getArg(idx); + auto value = call_instr->getArgOperand(idx); + + auto cst = llvm::dyn_cast(value); + if (cst != nullptr) + { + argument_constants[arg->getName().str()] = cst; + } + else + { + remaining_arguments.push_back(idx); + } + + ++idx; + } + + // Checking which arrays are constant for this + auto new_callee = ExpandFunctionCall(depenency_graph, *callee_function, argument_constants); + + // Replacing call if a new function was created + if (new_callee != nullptr) + { + llvm::IRBuilder<> builder(call_instr); + (void)call_instr; + + // List with new call arguments + std::vector new_arguments; + for (auto const &i : remaining_arguments) + { + // Getting the i'th argument + llvm::Value *arg = call_instr->getArgOperand(i); + + // Adding arguments that were not constant + if (argument_constants.find(arg->getName().str()) == argument_constants.end()) + { + new_arguments.push_back(arg); + } + } + + // Creating a new call + llvm::Value *new_call = builder.CreateCall(new_callee, new_arguments); + + // Replace all calls to old function with calls to new function + for (auto &use : call_instr->uses()) + { + llvm::User *user = use.getUser(); + user->setOperand(use.getOperandNo(), new_call); + } + + // Schedule original instruction for deletion + to_remove.push_back(&instruction); + } + } + } + + // Removing instructions + for (auto &instruction : to_remove) + { + if (!instruction->use_empty()) + { + instruction->replaceAllUsesWith(llvm::UndefValue::get(instruction->getType())); + } + instruction->eraseFromParent(); + } + } + + return llvm::PreservedAnalyses::none(); +} + +llvm::Function *ExpandStaticAllocationPass::ExpandFunctionCall( + QubitAllocationResult const &depenency_graph, llvm::Function &callee, + ConstantArguments const &const_args) +{ + bool should_replace_function = false; + if (!depenency_graph.empty()) + { + // Checking that any of all allocations in the function + // body becomes static from replacing constant function arguments + for (auto const &allocation : depenency_graph) + { + // Ignoring non-static allocations + if (!allocation.is_possibly_static) + { + continue; + } + + // Ignoring trivial allocations + if (allocation.depends_on.empty()) + { + continue; + } + + // Checking all dependencies are constant + bool all_const = true; + for (auto &name : allocation.depends_on) + { + all_const = all_const && (const_args.find(name) != const_args.end()); + } + + // In case that all dependencies are constant for this + // allocation, we should replace the function with one where + // the arguments are eliminated. + if (all_const) + { + should_replace_function = true; + } + } + } + + // Replacing function if needed + if (should_replace_function) + { + auto module = callee.getParent(); + auto & context = module->getContext(); + llvm::IRBuilder<> builder(context); + + // Copying the original function + llvm::ValueToValueMapTy remapper; + std::vector arg_types; + + // The user might be deleting arguments to the function by specifying them in + // the VMap. If so, we need to not add the arguments to the arg ty vector + // + for (auto const &arg : callee.args()) + { + // Skipping constant arguments + + if (const_args.find(arg.getName().str()) != const_args.end()) + { + continue; + } + + arg_types.push_back(arg.getType()); + } + + // Creating a new function + llvm::FunctionType *function_type = llvm::FunctionType::get( + callee.getFunctionType()->getReturnType(), arg_types, callee.getFunctionType()->isVarArg()); + auto function = llvm::Function::Create(function_type, callee.getLinkage(), + callee.getAddressSpace(), callee.getName(), module); + + // Copying the non-const arguments + auto dest_args_it = function->arg_begin(); + + for (auto const &arg : callee.args()) + { + auto const_it = const_args.find(arg.getName().str()); + if (const_it == const_args.end()) + { + // Mapping remaining function arguments + dest_args_it->setName(arg.getName()); + remapper[&arg] = &*dest_args_it++; + } + else + { + remapper[&arg] = llvm::ConstantInt::get(context, const_it->second->getValue()); + } + } + + llvm::SmallVector returns; // Ignore returns cloned. + + // TODO(tfr): In LLVM 13 upgrade 'true' to 'llvm::CloneFunctionChangeType::LocalChangesOnly' + llvm::CloneFunctionInto(function, &callee, remapper, true, returns, "", nullptr); + + verifyFunction(*function); + + return function; + } + + return nullptr; +} + +bool ExpandStaticAllocationPass::isRequired() +{ + return true; +} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp new file mode 100644 index 0000000000..8166737e27 --- /dev/null +++ b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp @@ -0,0 +1,46 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" +#include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" + +#include + +namespace microsoft { +namespace quantum { + +class ExpandStaticAllocationPass : public llvm::PassInfoMixin +{ +public: + using QubitAllocationResult = QubitAllocationAnalysisAnalytics::Result; + using ConstantArguments = std::unordered_map; + + /// Constructors and destructors + /// @{ + ExpandStaticAllocationPass() = default; + ExpandStaticAllocationPass(ExpandStaticAllocationPass const &) = default; + ExpandStaticAllocationPass(ExpandStaticAllocationPass &&) = default; + ~ExpandStaticAllocationPass() = default; + /// @} + + /// Operators + /// @{ + ExpandStaticAllocationPass &operator=(ExpandStaticAllocationPass const &) = default; + ExpandStaticAllocationPass &operator=(ExpandStaticAllocationPass &&) = default; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); + static bool isRequired(); + /// @} + + /// @{ + llvm::Function *ExpandFunctionCall(QubitAllocationResult const &depenency_graph, + llvm::Function &callee, ConstantArguments const &const_args); + /// @} +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp b/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp new file mode 100644 index 0000000000..2475aaf8cd --- /dev/null +++ b/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" + +#include "Llvm.hpp" + +#include +#include + +namespace { +llvm::PassPluginLibraryInfo getExpandStaticAllocationPluginInfo() +{ + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "ExpandStaticAllocation", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the pass + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "expand-static-allocation") + { + fpm.addPass(ExpandStaticAllocationPass()); + return true; + } + + return false; + }); + }}; +} +} // namespace + +// Interface for loading the plugin +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return getExpandStaticAllocationPluginInfo(); +} diff --git a/src/Passes/libs/ExpandStaticAllocation/SPECIFICATION.md b/src/Passes/libs/ExpandStaticAllocation/SPECIFICATION.md new file mode 100644 index 0000000000..5095eea8b3 --- /dev/null +++ b/src/Passes/libs/ExpandStaticAllocation/SPECIFICATION.md @@ -0,0 +1 @@ +# {ExpandStaticAllocation} Specification From 1866d17434397dbc901c8b180b46496d177ffde1 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 28 Jul 2021 16:47:26 +0200 Subject: [PATCH 045/106] Adding expansion pass to allow allocating arrays statically --- .../ExpandStaticAllocation.cpp | 386 +++++++++--------- .../ExpandStaticAllocation.hpp | 77 ++-- .../LibExpandStaticAllocation.cpp | 46 ++- 3 files changed, 261 insertions(+), 248 deletions(-) diff --git a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp index 154c4f9617..5684864d7a 100644 --- a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp +++ b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp @@ -1,223 +1,227 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" - #include "Llvm.hpp" +#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" + #include #include -namespace microsoft { -namespace quantum { -llvm::PreservedAnalyses ExpandStaticAllocationPass::run(llvm::Function & function, - llvm::FunctionAnalysisManager &fam) +namespace microsoft { - // Pass body - for (auto &basic_block : function) - { - // Keeping track of instructions to remove in each block - std::vector to_remove; - - for (auto &instruction : basic_block) +namespace quantum +{ + llvm::PreservedAnalyses ExpandStaticAllocationPass::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& fam) { - // Finding calls - auto *call_instr = llvm::dyn_cast(&instruction); - if (call_instr == nullptr) - { - continue; - } - - ConstantArguments argument_constants{}; - std::vector remaining_arguments{}; - - auto callee_function = call_instr->getCalledFunction(); - auto &depenency_graph = fam.getResult(*callee_function); - - if (depenency_graph.size() > 0) - { - uint32_t idx = 0; - uint32_t N = static_cast(callee_function->arg_size()); - - // Finding argument constants - while (idx < N) + // Pass body + for (auto& basic_block : function) { - auto arg = callee_function->getArg(idx); - auto value = call_instr->getArgOperand(idx); - - auto cst = llvm::dyn_cast(value); - if (cst != nullptr) - { - argument_constants[arg->getName().str()] = cst; - } - else - { - remaining_arguments.push_back(idx); - } - - ++idx; - } - - // Checking which arrays are constant for this - auto new_callee = ExpandFunctionCall(depenency_graph, *callee_function, argument_constants); + // Keeping track of instructions to remove in each block + std::vector to_remove; - // Replacing call if a new function was created - if (new_callee != nullptr) - { - llvm::IRBuilder<> builder(call_instr); - (void)call_instr; - - // List with new call arguments - std::vector new_arguments; - for (auto const &i : remaining_arguments) - { - // Getting the i'th argument - llvm::Value *arg = call_instr->getArgOperand(i); - - // Adding arguments that were not constant - if (argument_constants.find(arg->getName().str()) == argument_constants.end()) + for (auto& instruction : basic_block) { - new_arguments.push_back(arg); + // Finding calls + auto* call_instr = llvm::dyn_cast(&instruction); + if (call_instr == nullptr) + { + continue; + } + + ConstantArguments argument_constants{}; + std::vector remaining_arguments{}; + + auto callee_function = call_instr->getCalledFunction(); + auto& depenency_graph = fam.getResult(*callee_function); + + if (depenency_graph.size() > 0) + { + uint32_t idx = 0; + auto n = static_cast(callee_function->arg_size()); + + // Finding argument constants + while (idx < n) + { + auto arg = callee_function->getArg(idx); + auto value = call_instr->getArgOperand(idx); + + auto cst = llvm::dyn_cast(value); + if (cst != nullptr) + { + argument_constants[arg->getName().str()] = cst; + } + else + { + remaining_arguments.push_back(idx); + } + + ++idx; + } + + // Checking which arrays are constant for this + auto new_callee = expandFunctionCall(depenency_graph, *callee_function, argument_constants); + + // Replacing call if a new function was created + if (new_callee != nullptr) + { + llvm::IRBuilder<> builder(call_instr); + (void)call_instr; + + // List with new call arguments + std::vector new_arguments; + for (auto const& i : remaining_arguments) + { + // Getting the i'th argument + llvm::Value* arg = call_instr->getArgOperand(i); + + // Adding arguments that were not constant + if (argument_constants.find(arg->getName().str()) == argument_constants.end()) + { + new_arguments.push_back(arg); + } + } + + // Creating a new call + llvm::Value* new_call = builder.CreateCall(new_callee, new_arguments); + + // Replace all calls to old function with calls to new function + for (auto& use : call_instr->uses()) + { + llvm::User* user = use.getUser(); + user->setOperand(use.getOperandNo(), new_call); + } + + // Schedule original instruction for deletion + to_remove.push_back(&instruction); + } + } } - } - - // Creating a new call - llvm::Value *new_call = builder.CreateCall(new_callee, new_arguments); - // Replace all calls to old function with calls to new function - for (auto &use : call_instr->uses()) - { - llvm::User *user = use.getUser(); - user->setOperand(use.getOperandNo(), new_call); - } - - // Schedule original instruction for deletion - to_remove.push_back(&instruction); + // Removing instructions + for (auto& instruction : to_remove) + { + if (!instruction->use_empty()) + { + instruction->replaceAllUsesWith(llvm::UndefValue::get(instruction->getType())); + } + instruction->eraseFromParent(); + } } - } - } - // Removing instructions - for (auto &instruction : to_remove) - { - if (!instruction->use_empty()) - { - instruction->replaceAllUsesWith(llvm::UndefValue::get(instruction->getType())); - } - instruction->eraseFromParent(); + return llvm::PreservedAnalyses::none(); } - } - return llvm::PreservedAnalyses::none(); -} - -llvm::Function *ExpandStaticAllocationPass::ExpandFunctionCall( - QubitAllocationResult const &depenency_graph, llvm::Function &callee, - ConstantArguments const &const_args) -{ - bool should_replace_function = false; - if (!depenency_graph.empty()) - { - // Checking that any of all allocations in the function - // body becomes static from replacing constant function arguments - for (auto const &allocation : depenency_graph) - { - // Ignoring non-static allocations - if (!allocation.is_possibly_static) - { - continue; - } - - // Ignoring trivial allocations - if (allocation.depends_on.empty()) - { - continue; - } - - // Checking all dependencies are constant - bool all_const = true; - for (auto &name : allocation.depends_on) - { - all_const = all_const && (const_args.find(name) != const_args.end()); - } - - // In case that all dependencies are constant for this - // allocation, we should replace the function with one where - // the arguments are eliminated. - if (all_const) - { - should_replace_function = true; - } - } - } - - // Replacing function if needed - if (should_replace_function) - { - auto module = callee.getParent(); - auto & context = module->getContext(); - llvm::IRBuilder<> builder(context); - - // Copying the original function - llvm::ValueToValueMapTy remapper; - std::vector arg_types; - - // The user might be deleting arguments to the function by specifying them in - // the VMap. If so, we need to not add the arguments to the arg ty vector - // - for (auto const &arg : callee.args()) + llvm::Function* ExpandStaticAllocationPass::expandFunctionCall( + QubitAllocationResult const& depenency_graph, + llvm::Function& callee, + ConstantArguments const& const_args) { - // Skipping constant arguments + bool should_replace_function = false; + if (!depenency_graph.empty()) + { + // Checking that any of all allocations in the function + // body becomes static from replacing constant function arguments + for (auto const& allocation : depenency_graph) + { + // Ignoring non-static allocations + if (!allocation.is_possibly_static) + { + continue; + } + + // Ignoring trivial allocations + if (allocation.depends_on.empty()) + { + continue; + } + + // Checking all dependencies are constant + bool all_const = true; + for (auto& name : allocation.depends_on) + { + all_const = all_const && (const_args.find(name) != const_args.end()); + } + + // In case that all dependencies are constant for this + // allocation, we should replace the function with one where + // the arguments are eliminated. + if (all_const) + { + should_replace_function = true; + } + } + } - if (const_args.find(arg.getName().str()) != const_args.end()) - { - continue; - } + // Replacing function if needed + if (should_replace_function) + { + auto module = callee.getParent(); + auto& context = module->getContext(); + llvm::IRBuilder<> builder(context); + + // Copying the original function + llvm::ValueToValueMapTy remapper; + std::vector arg_types; + + // The user might be deleting arguments to the function by specifying them in + // the VMap. If so, we need to not add the arguments to the arg ty vector + // + for (auto const& arg : callee.args()) + { + // Skipping constant arguments - arg_types.push_back(arg.getType()); - } + if (const_args.find(arg.getName().str()) != const_args.end()) + { + continue; + } - // Creating a new function - llvm::FunctionType *function_type = llvm::FunctionType::get( - callee.getFunctionType()->getReturnType(), arg_types, callee.getFunctionType()->isVarArg()); - auto function = llvm::Function::Create(function_type, callee.getLinkage(), - callee.getAddressSpace(), callee.getName(), module); + arg_types.push_back(arg.getType()); + } - // Copying the non-const arguments - auto dest_args_it = function->arg_begin(); + // Creating a new function + llvm::FunctionType* function_type = llvm::FunctionType::get( + callee.getFunctionType()->getReturnType(), arg_types, callee.getFunctionType()->isVarArg()); + auto function = llvm::Function::Create( + function_type, callee.getLinkage(), callee.getAddressSpace(), callee.getName(), module); - for (auto const &arg : callee.args()) - { - auto const_it = const_args.find(arg.getName().str()); - if (const_it == const_args.end()) - { - // Mapping remaining function arguments - dest_args_it->setName(arg.getName()); - remapper[&arg] = &*dest_args_it++; - } - else - { - remapper[&arg] = llvm::ConstantInt::get(context, const_it->second->getValue()); - } - } + // Copying the non-const arguments + auto dest_args_it = function->arg_begin(); - llvm::SmallVector returns; // Ignore returns cloned. + for (auto const& arg : callee.args()) + { + auto const_it = const_args.find(arg.getName().str()); + if (const_it == const_args.end()) + { + // Mapping remaining function arguments + dest_args_it->setName(arg.getName()); + remapper[&arg] = &*dest_args_it++; + } + else + { + remapper[&arg] = llvm::ConstantInt::get(context, const_it->second->getValue()); + } + } - // TODO(tfr): In LLVM 13 upgrade 'true' to 'llvm::CloneFunctionChangeType::LocalChangesOnly' - llvm::CloneFunctionInto(function, &callee, remapper, true, returns, "", nullptr); + llvm::SmallVector returns; // Ignore returns cloned. - verifyFunction(*function); + // TODO(tfr): In LLVM 13 upgrade 'true' to 'llvm::CloneFunctionChangeType::LocalChangesOnly' + llvm::CloneFunctionInto(function, &callee, remapper, true, returns, "", nullptr); - return function; - } + verifyFunction(*function); - return nullptr; -} + return function; + } -bool ExpandStaticAllocationPass::isRequired() -{ - return true; -} + return nullptr; + } + + bool ExpandStaticAllocationPass::isRequired() + { + return true; + } -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp index 8166737e27..fbee619be2 100644 --- a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp +++ b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp @@ -3,44 +3,49 @@ // Licensed under the MIT License. #include "Llvm.hpp" + #include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" #include -namespace microsoft { -namespace quantum { - -class ExpandStaticAllocationPass : public llvm::PassInfoMixin +namespace microsoft { -public: - using QubitAllocationResult = QubitAllocationAnalysisAnalytics::Result; - using ConstantArguments = std::unordered_map; - - /// Constructors and destructors - /// @{ - ExpandStaticAllocationPass() = default; - ExpandStaticAllocationPass(ExpandStaticAllocationPass const &) = default; - ExpandStaticAllocationPass(ExpandStaticAllocationPass &&) = default; - ~ExpandStaticAllocationPass() = default; - /// @} - - /// Operators - /// @{ - ExpandStaticAllocationPass &operator=(ExpandStaticAllocationPass const &) = default; - ExpandStaticAllocationPass &operator=(ExpandStaticAllocationPass &&) = default; - /// @} - - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); - static bool isRequired(); - /// @} - - /// @{ - llvm::Function *ExpandFunctionCall(QubitAllocationResult const &depenency_graph, - llvm::Function &callee, ConstantArguments const &const_args); - /// @} -}; - -} // namespace quantum -} // namespace microsoft +namespace quantum +{ + + class ExpandStaticAllocationPass : public llvm::PassInfoMixin + { + public: + using QubitAllocationResult = QubitAllocationAnalysisAnalytics::Result; + using ConstantArguments = std::unordered_map; + + /// Constructors and destructors + /// @{ + ExpandStaticAllocationPass() = default; + ExpandStaticAllocationPass(ExpandStaticAllocationPass const&) = default; + ExpandStaticAllocationPass(ExpandStaticAllocationPass&&) = default; + ~ExpandStaticAllocationPass() = default; + /// @} + + /// Operators + /// @{ + ExpandStaticAllocationPass& operator=(ExpandStaticAllocationPass const&) = default; + ExpandStaticAllocationPass& operator=(ExpandStaticAllocationPass&&) = default; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} + + /// @{ + llvm::Function* expandFunctionCall( + QubitAllocationResult const& depenency_graph, + llvm::Function& callee, + ConstantArguments const& const_args); + /// @} + }; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp b/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp index 2475aaf8cd..e73a64b7d8 100644 --- a/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp +++ b/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp @@ -1,38 +1,42 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" - #include "Llvm.hpp" +#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" + #include #include -namespace { +namespace +{ llvm::PassPluginLibraryInfo getExpandStaticAllocationPluginInfo() { - using namespace microsoft::quantum; - using namespace llvm; - - return { - LLVM_PLUGIN_API_VERSION, "ExpandStaticAllocation", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the pass - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "expand-static-allocation") - { - fpm.addPass(ExpandStaticAllocationPass()); - return true; - } + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "ExpandStaticAllocation", LLVM_VERSION_STRING, + [](PassBuilder& pb) + { + // Registering the pass + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) + { + if (name == "expand-static-allocation") + { + fpm.addPass(ExpandStaticAllocationPass()); + return true; + } - return false; - }); - }}; -} + return false; + }); + }}; +} } // namespace // Interface for loading the plugin extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getExpandStaticAllocationPluginInfo(); + return getExpandStaticAllocationPluginInfo(); } From 76a534bf438f416b4e8e330bb36c423b440b5199 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 30 Jul 2021 07:10:59 +0200 Subject: [PATCH 046/106] Removing leading % from command line snippets --- src/Passes/CONTRIBUTING.md | 2 +- src/Passes/README.md | 18 +- src/Passes/docs/continous-integration.md | 10 +- src/Passes/docs/library-structure.md | 2 +- .../examples/OptimisationUsingOpt/README.md | 6 +- .../QubitAllocationAnalysis/README.md | 26 +- .../analysis-example.ll | 438 ++++++++++++++++++ 7 files changed, 470 insertions(+), 32 deletions(-) create mode 100644 src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll diff --git a/src/Passes/CONTRIBUTING.md b/src/Passes/CONTRIBUTING.md index 0b4493bb8d..f44a11a6c5 100644 --- a/src/Passes/CONTRIBUTING.md +++ b/src/Passes/CONTRIBUTING.md @@ -59,7 +59,7 @@ Prefer `#pragma once` over `#ifdef` protection. ## Code TODOs must contain owner name or Github issue ```sh -% ./manage runci +./manage runci (...) QsPasses/src/OpsCounter/OpsCounter.cpp:39:21: error: missing username/bug in TODO [google-readability-todo,-warnings-as-errors] // TODO: Fails to load if this is present diff --git a/src/Passes/README.md b/src/Passes/README.md index 6419c757b0..c1b34e91a5 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -204,7 +204,7 @@ For a gentle introduction, see examples. To make it easy to create a new pass, we have created a few templates to get you started quickly: ```sh -% ./manage create-pass HelloWorld +./manage create-pass HelloWorld Available templates: 1. Function Pass @@ -215,9 +215,9 @@ Select a template:1 At the moment you only have one choice which is a function pass. Over time we will add additional templates. Once you have instantiated your template, you are ready to build it: ```sh -% mkdir Debug -% cd Debug -% cmake .. +mkdir Debug +cd Debug +cmake .. -- The C compiler identification is AppleClang 12.0.5.12050022 -- The CXX compiler identification is AppleClang 12.0.5.12050022 (...) @@ -225,7 +225,7 @@ At the moment you only have one choice which is a function pass. Over time we wi -- Generating done -- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/Debug -% make +make [ 25%] Building CXX object libs/CMakeFiles/OpsCounter.dir/OpsCounter/OpsCounter.cpp.o [ 50%] Linking CXX shared library libOpsCounter.dylib @@ -240,9 +240,9 @@ template will not do much except for print the function names of your code. To t build an IR and run the pass: ```sh -% cd ../examples/ClassicalIrCommandline -% make -% opt -load-pass-plugin ../../Debug/libs/libHelloWorld.{dylib,so} --passes="hello-world" -disable-output classical-program.ll +cd ../examples/ClassicalIrCommandline +make +opt -load-pass-plugin ../../Debug/libs/libHelloWorld.{dylib,so} --passes="hello-world" -disable-output classical-program.ll ``` If everything worked, you should see output like this: @@ -303,7 +303,7 @@ that you use a docker image to perform these steps. TODO(TFR): The docker image One error that you may encounter is that an analysis pass does not load with output similar to this: ```sh -% opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -enable-debugify --passes="operation-counter" -disable-output classical-program.bc +opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -enable-debugify --passes="operation-counter" -disable-output classical-program.bc Failed to load passes from '../../Debug/libQSharpPasses.dylib'. Request ignored. opt: unknown pass name 'operation-counter' ``` diff --git a/src/Passes/docs/continous-integration.md b/src/Passes/docs/continous-integration.md index 30cc273ef5..364d230883 100644 --- a/src/Passes/docs/continous-integration.md +++ b/src/Passes/docs/continous-integration.md @@ -3,7 +3,7 @@ In order to run the tests, you first need to build the library. Assuming that this is already done and the corresponding build is in `Debug/`, run the tests from the `Debug` folder: ``` -% lit tests/ -v +lit tests/ -v -- Testing: 2 tests, 2 workers -- PASS: Quantum-Passes :: QubitAllocationAnalysis/case1.ll (1 of 2) PASS: Quantum-Passes :: QubitAllocationAnalysis/case2.ll (2 of 2) @@ -49,13 +49,13 @@ This part defines pipelines for `.hpp` files and `.cpp` files allowing the devel Each of these CI stages can executed individually using `./manage` or you can run the entire CI process by invoking `./manage runci`. An example of what this may look like is here: ```zsh -% ./manage runci +./manage runci 2021-07-21 14:38:04,896 - FormatChecker - ERROR - /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/src/OpsCounter/OpsCounter.cpp was not correctly formatted. 2021-07-21 14:38:04,899 - FormatChecker - ERROR - Your code did not pass formatting. -% ./manage stylecheck --fix-issues -% ./manage runci +./manage stylecheck --fix-issues +./manage runci -- Found LLVM 11.1.0 -- Using LLVMConfig.cmake in: /usr/local/opt/llvm@11/lib/cmake/llvm @@ -78,7 +78,7 @@ Use -header-filter=.* to display errors from all non-system headers. Use -system 2021-07-21 14:38:40,191 - Linter - ERROR - /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/src/OpsCounter/OpsCounter.cpp failed static analysis # ISSUES FIXED MANUALLY -% ./manage runci +./manage runci -- Found LLVM 11.1.0 -- Using LLVMConfig.cmake in: /usr/local/opt/llvm@11/lib/cmake/llvm diff --git a/src/Passes/docs/library-structure.md b/src/Passes/docs/library-structure.md index d1977cb0e1..ae0c42238b 100644 --- a/src/Passes/docs/library-structure.md +++ b/src/Passes/docs/library-structure.md @@ -13,7 +13,7 @@ libs Adding a new pass is easy using the `manage` tool developed in this PR: ``` -% ./manage create-pass HelloWorld +./manage create-pass HelloWorld Available templates: 1. Function Pass diff --git a/src/Passes/examples/OptimisationUsingOpt/README.md b/src/Passes/examples/OptimisationUsingOpt/README.md index 7f84f1b2d5..03cd9db246 100644 --- a/src/Passes/examples/OptimisationUsingOpt/README.md +++ b/src/Passes/examples/OptimisationUsingOpt/README.md @@ -20,10 +20,10 @@ namespace Example { You find the code for this in the folder `SimpleExample`. To generate a QIR for this code, go to the folder and run ```sh -% cd SimpleExample/ -% dotnet clean SimpleExample.csproj +cd SimpleExample/ +dotnet clean SimpleExample.csproj (...) -% dotnet build SimpleExample.csproj -c Debug +dotnet build SimpleExample.csproj -c Debug ``` If everything went well, you should now have a subdirectory called `qir` and inside `qir`, you will find `SimpleExample.ll`. Depending on the version of Q#, diff --git a/src/Passes/examples/QubitAllocationAnalysis/README.md b/src/Passes/examples/QubitAllocationAnalysis/README.md index 383ee026ed..515b641ba4 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/README.md +++ b/src/Passes/examples/QubitAllocationAnalysis/README.md @@ -11,7 +11,7 @@ The following depnds on: Running following command ```sh -% make run +make run ``` will first build the pass, then build the QIR using Q# following by removing the noise using `opt` with optimisation level 1. Finally, it will execute the analysis pass and should provide you with information about qubit allocation in the Q# program defined in `ConstSizeArray/ConstSizeArray.qs`. @@ -21,35 +21,35 @@ will first build the pass, then build the QIR using Q# following by removing the From the Passes root (two levels up from this directory), make a new build ```sh -% mkdir Debug -% cd Debug -% cmake .. +mkdir Debug +cd Debug +cmake .. ``` and then compile the `QubitAllocationAnalysis`: ```sh -% make QubitAllocationAnalysis +make QubitAllocationAnalysis ``` Next return `examples/QubitAllocationAnalysis` and enter the directory `ConstSizeArray` to build the QIR: ```sh -% make analysis-example.ll +make analysis-example.ll ``` or execute the commands manually, ```sh -% dotnet build ConstSizeArray.csproj -% opt -S qir/ConstSizeArray.ll -O1 > ../analysis-example.ll -% make clean +dotnet build ConstSizeArray.csproj +opt -S qir/ConstSizeArray.ll -O1 > ../analysis-example.ll +make clean ``` Returning to `examples/QubitAllocationAnalysis`, the pass can now be ran by executing: ```sh -% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll ``` ## Example cases @@ -92,7 +92,7 @@ entry: Running the pass procudes following output: ``` -% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll Example__QuantumProgram__body ==================== @@ -152,7 +152,7 @@ entry: The analyser returns following output: ``` -% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll Example__QuantumProgram__body ==================== @@ -196,7 +196,7 @@ namespace Example { We will omit the QIR in the documenation as it is a long. The output of the anaysis is: ``` -% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll Example__QuantumProgram__body ==================== diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll new file mode 100644 index 0000000000..6f6c98c8e0 --- /dev/null +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -0,0 +1,438 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Tuple = type opaque +%Qubit = type opaque +%Array = type opaque +%Result = type opaque +%Callable = type opaque +%String = type opaque + +@Microsoft__Quantum__Qir__Emission__M = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Qir__Emission__M__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@0 = internal constant [3 x i8] c", \00" +@1 = internal constant [2 x i8] c"[\00" +@2 = internal constant [2 x i8] c"]\00" + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) local_unnamed_addr + +declare void @__quantum__qis__cnot__adj(%Qubit*, %Qubit*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare %Tuple* @__quantum__rt__tuple_create(i64) local_unnamed_addr + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) local_unnamed_addr + +define internal fastcc %Result* @Microsoft__Quantum__Qir__Emission__M__body(%Qubit* %q) unnamed_addr { +entry: + %0 = call %Result* @__quantum__qis__m__body(%Qubit* %q) + ret %Result* %0 +} + +declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr + +define internal fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %a, %Qubit* %b, %Qubit* %c) unnamed_addr { +entry: + call void @__quantum__qis__cnot__body(%Qubit* %c, %Qubit* %b) + call void @__quantum__qis__cnot__body(%Qubit* %c, %Qubit* %a) + call void @__quantum__qis__toffoli__body(%Qubit* %a, %Qubit* %b, %Qubit* %c) + ret void +} + +declare void @__quantum__qis__toffoli__body(%Qubit*, %Qubit*, %Qubit*) local_unnamed_addr + +define internal fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %a, %Qubit* %b, %Qubit* %c) unnamed_addr { +entry: + call void @__quantum__qis__toffoli__adj(%Qubit* %a, %Qubit* %b, %Qubit* %c) + call void @__quantum__qis__cnot__adj(%Qubit* %c, %Qubit* %a) + call void @__quantum__qis__cnot__adj(%Qubit* %c, %Qubit* %b) + ret void +} + +declare void @__quantum__qis__toffoli__adj(%Qubit*, %Qubit*, %Qubit*) local_unnamed_addr + +define internal fastcc %Array* @Microsoft__Quantum__Qir__Emission__RunAdder__body() unnamed_addr { +entry: + %a = call %Array* @__quantum__rt__qubit_allocate_array(i64 4) + call void @__quantum__rt__array_update_alias_count(%Array* %a, i32 1) + %b = call %Array* @__quantum__rt__qubit_allocate_array(i64 4) + call void @__quantum__rt__array_update_alias_count(%Array* %b, i32 1) + %cin = call %Qubit* @__quantum__rt__qubit_allocate() + %cout = call %Qubit* @__quantum__rt__qubit_allocate() + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %q = load %Qubit*, %Qubit** %1, align 8 + call void @__quantum__qis__x__body(%Qubit* %q) + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %b) + %3 = add i64 %2, -1 + %.not1 = icmp slt i64 %3, 0 + br i1 %.not1, label %exit__1, label %body__1 + +body__1: ; preds = %entry, %body__1 + %4 = phi i64 [ %7, %body__1 ], [ 0, %entry ] + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 %4) + %6 = bitcast i8* %5 to %Qubit** + %q__1 = load %Qubit*, %Qubit** %6, align 8 + call void @__quantum__qis__x__body(%Qubit* %q__1) + %7 = add i64 %4, 1 + %.not = icmp sgt i64 %7, %3 + br i1 %.not, label %exit__1, label %body__1 + +exit__1: ; preds = %body__1, %entry + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %cin, %Qubit* %10, %Qubit* %13) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) + %15 = bitcast i8* %14 to %Qubit** + %16 = load %Qubit*, %Qubit** %15, align 8 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 1) + %18 = bitcast i8* %17 to %Qubit** + %19 = load %Qubit*, %Qubit** %18, align 8 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) + %21 = bitcast i8* %20 to %Qubit** + %22 = load %Qubit*, %Qubit** %21, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %16, %Qubit* %19, %Qubit* %22) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) + %24 = bitcast i8* %23 to %Qubit** + %25 = load %Qubit*, %Qubit** %24, align 8 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 2) + %27 = bitcast i8* %26 to %Qubit** + %28 = load %Qubit*, %Qubit** %27, align 8 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) + %30 = bitcast i8* %29 to %Qubit** + %31 = load %Qubit*, %Qubit** %30, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %25, %Qubit* %28, %Qubit* %31) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) + %33 = bitcast i8* %32 to %Qubit** + %34 = load %Qubit*, %Qubit** %33, align 8 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 3) + %36 = bitcast i8* %35 to %Qubit** + %37 = load %Qubit*, %Qubit** %36, align 8 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 3) + %39 = bitcast i8* %38 to %Qubit** + %40 = load %Qubit*, %Qubit** %39, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %34, %Qubit* %37, %Qubit* %40) + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 3) + %42 = bitcast i8* %41 to %Qubit** + %c = load %Qubit*, %Qubit** %42, align 8 + call void @__quantum__qis__cnot__body(%Qubit* %c, %Qubit* %cout) + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) + %44 = bitcast i8* %43 to %Qubit** + %45 = load %Qubit*, %Qubit** %44, align 8 + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 3) + %47 = bitcast i8* %46 to %Qubit** + %48 = load %Qubit*, %Qubit** %47, align 8 + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 3) + %50 = bitcast i8* %49 to %Qubit** + %51 = load %Qubit*, %Qubit** %50, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %45, %Qubit* %48, %Qubit* %51) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) + %53 = bitcast i8* %52 to %Qubit** + %54 = load %Qubit*, %Qubit** %53, align 8 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 2) + %56 = bitcast i8* %55 to %Qubit** + %57 = load %Qubit*, %Qubit** %56, align 8 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) + %59 = bitcast i8* %58 to %Qubit** + %60 = load %Qubit*, %Qubit** %59, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %54, %Qubit* %57, %Qubit* %60) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) + %62 = bitcast i8* %61 to %Qubit** + %63 = load %Qubit*, %Qubit** %62, align 8 + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 1) + %65 = bitcast i8* %64 to %Qubit** + %66 = load %Qubit*, %Qubit** %65, align 8 + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) + %68 = bitcast i8* %67 to %Qubit** + %69 = load %Qubit*, %Qubit** %68, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %63, %Qubit* %66, %Qubit* %69) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 0) + %71 = bitcast i8* %70 to %Qubit** + %72 = load %Qubit*, %Qubit** %71, align 8 + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) + %74 = bitcast i8* %73 to %Qubit** + %75 = load %Qubit*, %Qubit** %74, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %cin, %Qubit* %72, %Qubit* %75) + %76 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* nonnull @Microsoft__Quantum__Qir__Emission__M, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %77 = call fastcc %Array* @Microsoft__Quantum__Qir__Emission___73da7dcac81a47ddabb1a0e30be3dfdb_ForEach__body(%Callable* %76, %Array* %b) + call void @__quantum__rt__array_update_alias_count(%Array* %b, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %a, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %76, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %76, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %cin) + call void @__quantum__rt__qubit_release(%Qubit* %cout) + call void @__quantum__rt__qubit_release_array(%Array* %b) + call void @__quantum__rt__qubit_release_array(%Array* %a) + ret %Array* %77 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr + +declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) local_unnamed_addr + +define internal fastcc %Array* @Microsoft__Quantum__Qir__Emission___73da7dcac81a47ddabb1a0e30be3dfdb_ForEach__body(%Callable* %action, %Array* %array) unnamed_addr { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 1) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %2 = add i64 %1, -1 + %.not9 = icmp slt i64 %2, 0 + br i1 %.not9, label %exit__1, label %body__1 + +body__1: ; preds = %entry, %exit__4 + %3 = phi i64 [ %32, %exit__4 ], [ 0, %entry ] + %res.010 = phi %Array* [ %14, %exit__4 ], [ %0, %entry ] + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %3) + %5 = bitcast i8* %4 to %Qubit** + %item = load %Qubit*, %Qubit** %5, align 8 + %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %8 = bitcast i8* %7 to %Result** + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 8) + %10 = bitcast %Tuple* %9 to %Qubit** + store %Qubit* %item, %Qubit** %10, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 8) + call void @__quantum__rt__callable_invoke(%Callable* %action, %Tuple* %9, %Tuple* %11) + %12 = bitcast %Tuple* %11 to %Result** + %13 = load %Result*, %Result** %12, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + store %Result* %13, %Result** %8, align 8 + %14 = call %Array* @__quantum__rt__array_concatenate(%Array* %res.010, %Array* %6) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) + %16 = add i64 %15, -1 + %.not57 = icmp slt i64 %16, 0 + br i1 %.not57, label %exit__2, label %body__2 + +exit__1: ; preds = %exit__4, %entry + %res.0.lcssa = phi %Array* [ %0, %entry ], [ %14, %exit__4 ] + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %res.0.lcssa, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret %Array* %res.0.lcssa + +body__2: ; preds = %body__1, %body__2 + %17 = phi i64 [ %21, %body__2 ], [ 0, %body__1 ] + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 %17) + %19 = bitcast i8* %18 to %Result** + %20 = load %Result*, %Result** %19, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %20, i32 1) + %21 = add i64 %17, 1 + %.not5 = icmp sgt i64 %21, %16 + br i1 %.not5, label %exit__2, label %body__2 + +exit__2: ; preds = %body__2, %body__1 + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %res.010, i32 -1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %23 = bitcast i8* %22 to %Result** + %24 = load %Result*, %Result** %23, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %24, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %res.010) + %26 = add i64 %25, -1 + %.not68 = icmp slt i64 %26, 0 + br i1 %.not68, label %exit__4, label %body__4 + +body__4: ; preds = %exit__2, %body__4 + %27 = phi i64 [ %31, %body__4 ], [ 0, %exit__2 ] + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %res.010, i64 %27) + %29 = bitcast i8* %28 to %Result** + %30 = load %Result*, %Result** %29, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %30, i32 -1) + %31 = add i64 %27, 1 + %.not6 = icmp sgt i64 %31, %26 + br i1 %.not6, label %exit__4, label %body__4 + +exit__4: ; preds = %body__4, %exit__2 + call void @__quantum__rt__array_update_reference_count(%Array* %res.010, i32 -1) + %32 = add i64 %3, 1 + %.not = icmp sgt i64 %32, %2 + br i1 %.not, label %exit__1, label %body__1 +} + +define internal void @Microsoft__Quantum__Qir__Emission__M__body__wrapper(%Tuple* nocapture readnone %capture-tuple, %Tuple* nocapture readonly %arg-tuple, %Tuple* nocapture %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to %Qubit** + %1 = load %Qubit*, %Qubit** %0, align 8 + %2 = call fastcc %Result* @Microsoft__Quantum__Qir__Emission__M__body(%Qubit* %1) + %3 = bitcast %Tuple* %result-tuple to %Result** + store %Result* %2, %Result** %3, align 8 + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) local_unnamed_addr + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) local_unnamed_addr + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) local_unnamed_addr + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) local_unnamed_addr + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) local_unnamed_addr + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) local_unnamed_addr + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) local_unnamed_addr + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) local_unnamed_addr + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define { i64, i8* }* @Microsoft__Quantum__Qir__Emission__RunAdder__Interop() local_unnamed_addr #0 { +entry: + %0 = call fastcc %Array* @Microsoft__Quantum__Qir__Emission__RunAdder__body() + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %2 = call i8* @__quantum__rt__memory_allocate(i64 %1) + %3 = ptrtoint i8* %2 to i64 + %4 = add i64 %1, -1 + %.not5 = icmp slt i64 %4, 0 + br i1 %.not5, label %exit__1, label %body__1 + +body__1: ; preds = %entry, %body__1 + %5 = phi i64 [ %14, %body__1 ], [ 0, %entry ] + %6 = add i64 %5, %3 + %7 = inttoptr i64 %6 to i8* + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %5) + %9 = bitcast i8* %8 to %Result** + %10 = load %Result*, %Result** %9, align 8 + %11 = call %Result* @__quantum__rt__result_get_zero() + %12 = call i1 @__quantum__rt__result_equal(%Result* %10, %Result* %11) + %not. = xor i1 %12, true + %13 = sext i1 %not. to i8 + store i8 %13, i8* %7, align 1 + %14 = add i64 %5, 1 + %.not = icmp sgt i64 %14, %4 + br i1 %.not, label %exit__1, label %body__1 + +exit__1: ; preds = %body__1, %entry + %15 = call i8* @__quantum__rt__memory_allocate(i64 16) + %16 = bitcast i8* %15 to i64* + store i64 %1, i64* %16, align 4 + %17 = getelementptr i8, i8* %15, i64 8 + %18 = bitcast i8* %17 to i8** + store i8* %2, i8** %18, align 8 + %.not34 = icmp slt i64 %4, 0 + br i1 %.not34, label %exit__2, label %body__2 + +body__2: ; preds = %exit__1, %body__2 + %19 = phi i64 [ %23, %body__2 ], [ 0, %exit__1 ] + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %19) + %21 = bitcast i8* %20 to %Result** + %22 = load %Result*, %Result** %21, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %22, i32 -1) + %23 = add i64 %19, 1 + %.not3 = icmp sgt i64 %23, %4 + br i1 %.not3, label %exit__2, label %body__2 + +exit__2: ; preds = %body__2, %exit__1 + %24 = bitcast i8* %15 to { i64, i8* }* + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret { i64, i8* }* %24 +} + +declare i8* @__quantum__rt__memory_allocate(i64) local_unnamed_addr + +declare %Result* @__quantum__rt__result_get_zero() local_unnamed_addr + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr + +define void @Microsoft__Quantum__Qir__Emission__RunAdder() local_unnamed_addr #1 { +entry: + %0 = call fastcc %Array* @Microsoft__Quantum__Qir__Emission__RunAdder__body() + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i64 0, i64 0)) + %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @1, i64 0, i64 0)) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %4 = add i64 %3, -1 + %.not7 = icmp slt i64 %4, 0 + br i1 %.not7, label %exit__1, label %body__1 + +body__1: ; preds = %entry, %condContinue__1 + %5 = phi i64 [ %14, %condContinue__1 ], [ 0, %entry ] + %6 = phi %String* [ %13, %condContinue__1 ], [ %2, %entry ] + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %5) + %8 = bitcast i8* %7 to %Result** + %9 = load %Result*, %Result** %8, align 8 + %.not5 = icmp eq %String* %6, %2 + br i1 %.not5, label %condContinue__1, label %condTrue__1 + +condTrue__1: ; preds = %body__1 + %10 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %1) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %body__1 + %11 = phi %String* [ %10, %condTrue__1 ], [ %6, %body__1 ] + %12 = call %String* @__quantum__rt__result_to_string(%Result* %9) + %13 = call %String* @__quantum__rt__string_concatenate(%String* %11, %String* %12) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + %14 = add i64 %5, 1 + %.not = icmp sgt i64 %14, %4 + br i1 %.not, label %exit__1, label %body__1 + +exit__1: ; preds = %condContinue__1, %entry + %.lcssa = phi %String* [ %2, %entry ], [ %13, %condContinue__1 ] + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i64 0, i64 0)) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %.lcssa, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %.lcssa, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + call void @__quantum__rt__message(%String* %16) + %.not46 = icmp slt i64 %4, 0 + br i1 %.not46, label %exit__2, label %body__2 + +body__2: ; preds = %exit__1, %body__2 + %17 = phi i64 [ %21, %body__2 ], [ 0, %exit__1 ] + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %17) + %19 = bitcast i8* %18 to %Result** + %20 = load %Result*, %Result** %19, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %20, i32 -1) + %21 = add i64 %17, 1 + %.not4 = icmp sgt i64 %21, %4 + br i1 %.not4, label %exit__2, label %body__2 + +exit__2: ; preds = %body__2, %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) local_unnamed_addr + +declare %String* @__quantum__rt__result_to_string(%Result*) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } From ae42ad2d32e7968bf2c964547c952c3c9f0fa156 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 30 Jul 2021 07:44:23 +0200 Subject: [PATCH 047/106] Fixing broken things from merge --- src/Passes/.clang-tidy.orig | 125 ------ src/Passes/CMakeLists.txt.orig | 52 --- src/Passes/CONTRIBUTING.md.orig | 97 ----- src/Passes/Makefile.orig | 10 - src/Passes/README.md.orig | 367 ------------------ src/Passes/docs/index.md.orig | 7 - .../ClassicalIrCommandline/Makefile.orig | 26 -- .../ClassicalIrCommandline/README.md.orig | 60 --- .../classical-program.c.orig | 21 - .../OptimisationUsingOpt/README.md.orig | 70 ---- .../SimpleExample/Makefile.orig | 17 - src/Passes/include/Llvm.hpp | 2 +- src/Passes/include/Llvm.hpp.orig | 62 --- src/Passes/libs/CMakeLists.txt.orig | 49 --- src/Passes/requirements.txt.orig | 5 - .../site-packages/TasksCI/builder.py.orig | 139 ------- src/Passes/site-packages/TasksCI/cli.py.orig | 282 -------------- .../site-packages/TasksCI/formatting.py.orig | 222 ----------- .../site-packages/TasksCI/linting.py.orig | 158 -------- .../site-packages/TasksCI/settings.py.orig | 21 - .../site-packages/TasksCI/toolchain.py.orig | 48 --- 21 files changed, 1 insertion(+), 1839 deletions(-) delete mode 100644 src/Passes/.clang-tidy.orig delete mode 100644 src/Passes/CMakeLists.txt.orig delete mode 100644 src/Passes/CONTRIBUTING.md.orig delete mode 100644 src/Passes/Makefile.orig delete mode 100644 src/Passes/README.md.orig delete mode 100644 src/Passes/docs/index.md.orig delete mode 100644 src/Passes/examples/ClassicalIrCommandline/Makefile.orig delete mode 100644 src/Passes/examples/ClassicalIrCommandline/README.md.orig delete mode 100644 src/Passes/examples/ClassicalIrCommandline/classical-program.c.orig delete mode 100644 src/Passes/examples/OptimisationUsingOpt/README.md.orig delete mode 100644 src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile.orig delete mode 100644 src/Passes/include/Llvm.hpp.orig delete mode 100644 src/Passes/libs/CMakeLists.txt.orig delete mode 100644 src/Passes/requirements.txt.orig delete mode 100644 src/Passes/site-packages/TasksCI/builder.py.orig delete mode 100644 src/Passes/site-packages/TasksCI/cli.py.orig delete mode 100644 src/Passes/site-packages/TasksCI/formatting.py.orig delete mode 100644 src/Passes/site-packages/TasksCI/linting.py.orig delete mode 100644 src/Passes/site-packages/TasksCI/settings.py.orig delete mode 100644 src/Passes/site-packages/TasksCI/toolchain.py.orig diff --git a/src/Passes/.clang-tidy.orig b/src/Passes/.clang-tidy.orig deleted file mode 100644 index 277cf2b186..0000000000 --- a/src/Passes/.clang-tidy.orig +++ /dev/null @@ -1,125 +0,0 @@ -Checks: "-*,bugprone-*,\ --readability-*,\ -readability-identifier-*,\ -readability-redundant-member-init,\ -readability-braces-around-statements,\ -cert-dcl*,\ -cert-env*,\ -cert-err52-cpp,\ -cert-err60-cpp,\ -cert-flp30-c,\ -clang-analyzer-security.FloatLoopCounter,\ -google-build-explicit-make-pair,\ -google-build-namespaces,\ -google-explicit-constructor,\ -google-readability-*,\ -google-runtime-operator,\ -hicpp-exception-baseclass,\ -hicpp-explicit-conversions,\ -hicpp-use-*,\ -modernize-avoid-bind,\ -modernize-loop-convert,\ -modernize-make-shared,\ -modernize-make-unique,\ -modernize-redundant-void-arg,\ -modernize-replace-random-shuffle,\ -modernize-shrink-to-fit,\ -modernize-use-bool-literals,\ -modernize-use-default-member-init,\ -modernize-use-emplace,\ -modernize-use-equals-default,\ -modernize-use-equals-delete,\ -modernize-use-noexcept,\ -modernize-use-nullptr,\ -modernize-use-override,\ -modernize-use-transparent-functors,\ -misc-*,\ --misc-misplaced-widening-cast,\ -performance-*" - -WarningsAsErrors: '*' -HeaderFilterRegex: '.*' - -CheckOptions: - # Configuration documentation: https://clang.llvm.org/extra/clang-tidy/checks/readability-identifier-naming.html - # Namespaces - - key: readability-identifier-naming.NamespaceCase - value: 'lower_case' - - # Classes and structs - - key: readability-identifier-naming.AbstractClassPrefix - value: 'I' - - key: readability-identifier-naming.ClassCase - value: 'CamelCase' - - key: readability-identifier-naming.StructCase - value: 'CamelCase' - - key: readability-identifier-naming.UnionCase - value: 'CamelCase' - - # Class members - - key: readability-identifier-naming.PrivateMemberCase - value: 'lower_case' - - key: readability-identifier-naming.PrivateMemberSuffix - value: '_' - - key: readability-identifier-naming.ProtectedMemberCase - value: 'lower_case' - - key: readability-identifier-naming.ProtectedMemberSuffix - value: '_' - -<<<<<<< HEAD - # Type Alias and Enum Types / constants -======= - # Alias ->>>>>>> features/llvm-passes - - key: readability-identifier-naming.TypeAliasCase - value: 'CamelCase' - - key: readability-identifier-naming.TypedefCase - value: 'CamelCase' -<<<<<<< HEAD - - key: readability-identifier-naming.EnumCase - value: 'CamelCase' - - key: readability-identifier-naming.EnumConstantCase - value: 'CamelCase' - - # Globals, consts and enums - - key: readability-identifier-naming.GlobalConstantCase - value: 'UPPER_CASE' - - key: readability-identifier-naming.GlobalConstantPrefix - value: 'G_' - - key: readability-identifier-naming.ConstantCase - value: 'UPPER_CASE' -======= ->>>>>>> features/llvm-passes - - # Functions - - key: readability-identifier-naming.FunctionCase - value: 'camelBack' - - key: readability-identifier-naming.IgnoreMainLikeFunctions - value: true - - # Variables and parameters - - key: readability-identifier-naming.VariableCase - value: 'lower_case' - - key: readability-identifier-naming.LocalVariableCase - value: 'lower_case' - - key: readability-identifier-naming.ParameterCase - value: 'lower_case' - -<<<<<<< HEAD -======= - # Globals, consts and enums - - key: readability-identifier-naming.GlobalConstantCase - value: 'UPPER_CASE' - - key: readability-identifier-naming.GlobalConstantPrefix - value: 'G_' - - key: readability-identifier-naming.ConstantCase - value: 'UPPER_CASE' - - key: readability-identifier-naming.EnumCase - value: 'CamelCase' - - key: readability-identifier-naming.EnumConstantCase - value: 'CamelCase' - ->>>>>>> features/llvm-passes - # Macros - - key: readability-identifier-naming.MacroDefinitionCase - value: 'UPPER_CASE' diff --git a/src/Passes/CMakeLists.txt.orig b/src/Passes/CMakeLists.txt.orig deleted file mode 100644 index cbe62ebbd8..0000000000 --- a/src/Passes/CMakeLists.txt.orig +++ /dev/null @@ -1,52 +0,0 @@ -cmake_minimum_required(VERSION 3.4.3) - -project(QSharpPasses) - -find_package(LLVM REQUIRED CONFIG) -include(CheckCXXCompilerFlag) - -message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") -message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") - -# Setting the standard configuration for the C++ compiler -# Rather than allowing C++17, we restrict ourselves to -# C++14 as this is the standard currently used by the LLVM -# project for compilation of the framework. While there is -# a very small chance that the difference in standard -# would break things, it is a possibility nonetheless. -set(CMAKE_CXX_STANDARD 14) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") - -# LLVM is normally built without RTTI. Be consistent with that. -if(NOT LLVM_ENABLE_RTTI) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") -endif() - -# -fvisibility-inlines-hidden is set when building LLVM and on Darwin warnings -# are triggered if llvm-tutor is built without this flag (though otherwise it -# builds fine). For consistency, add it here too. -check_cxx_compiler_flag("-fvisibility-inlines-hidden" SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG) -if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} EQUAL "1") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") -endif() - -# We export the compile commands which are needed by clang-tidy -# to run the static analysis -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -# Adding LLVM include directories. We may choose -# to move this to a module level at a later point -include_directories(${LLVM_INCLUDE_DIRS}) -link_directories(${LLVM_LIBRARY_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/src) - -# Adding the libraries -add_subdirectory(libs) -<<<<<<< HEAD -add_subdirectory(tests) -======= ->>>>>>> features/llvm-passes diff --git a/src/Passes/CONTRIBUTING.md.orig b/src/Passes/CONTRIBUTING.md.orig deleted file mode 100644 index 463e3f8d75..0000000000 --- a/src/Passes/CONTRIBUTING.md.orig +++ /dev/null @@ -1,97 +0,0 @@ -# Contributing (Proposal - WiP) - -This document is work in progress and nothing is set in stone. In case you do not want to feel like reading this style guide, just run - -```sh -./manage runci -``` - -<<<<<<< HEAD -from the `QsPasses` directory as all points defined in this document is automatically enforces. You can then refer to this guide for an explanation for why and how. -======= -from the `Passes` directory as all points defined in this document is automatically enforces. You can then refer to this guide for an explanation for why and how. ->>>>>>> features/llvm-passes - -## Why do we need a style guide? - -Consistency and readibility such that it is easy to read and understand code that was not written by yourself. For example, if one developer uses `CamelCase` for namespaces and `snake_case` for classes while another uses `snake_case` for namespaces and `CamelCase` you may end up with code sections that looks like this - -```cpp -int32_t main() -{ - name_space1::Class1 hello; - NameSpace2::class_name world; -} -``` - -which is hard to read. - -## What does the style guide apply to? - -<<<<<<< HEAD -The style guide applies to any new code written as well as code that is being refactored added to the `QsPasses` library. We do not rewrite existing code for the sake just changing the style. -======= -The style guide applies to any new code written as well as code that is being refactored added to the `Passes` library. We do not rewrite existing code for the sake just changing the style. ->>>>>>> features/llvm-passes - -## Style discrepency - -In case of a discrepency between this guideline and `clang-tidy` or `clang-format`, -clang tools rule. In case of discrency between this guide and any guides subsequently referenced guides, this guide rule. However, feel free to suggest changes. Changes will be incorporated on the basis -that updated styles are apply to new code and not existing code. - -## Naming - -Naming is taken from the [Microsoft AirSim](https://github.com/microsoft/AirSim/blob/master/docs/coding_guidelines.md) project. - -| **Code Element** | **Style** | **Comment** | -| --------------------- | -------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | -| Namespace | snake_case | Differentiates `namespace::ClassName` and `ClassName::SubClass` names | -| Class name | CamelCase | To differentiate from STL types which ISO recommends (do not use "C" or "T" prefixes) | -| Function name | camelCase | Lower case start is almost universal except for .Net world | -| Parameters/Locals | snake_case | Vast majority of standards recommends this because \_ is more readable to C++ crowd (although not much to Java/.Net crowd) | -| Member variables | snake_case_with\_ | The prefix \_ is heavily discouraged as ISO has rules around reserving \_identifiers, so we recommend suffix instead | -| Enums and its members | CamelCase | Most except very old standards agree with this one | -| Globals | g_snake_case | Avoid using globals whenever possible, but if you have to use `g_`. | -| Constants | UPPER_CASE | Very contentious and we just have to pick one here, unless if is a private constant in class or method, then use naming for Members or Locals | -| File names | Match case of class name in file | Lot of pro and cons either way but this removes inconsistency in auto generated code (important for ROS) | - -## Modernise when possible - -In general, modernise the code where possible. For instance, prefer `using` of `typedef`. - -## Header guards - -Prefer `#pragma once` over `#ifdef` protection. - -## Code TODOs must contain owner name or Github issue - -```sh -<<<<<<< HEAD -./manage runci -(...) -QsPasses/src/OpsCounter/OpsCounter.cpp:39:21: error: missing username/bug in TODO [google-readability-todo,-warnings-as-errors] -======= -% ./manage runci -(...) -Passes/src/OpsCounter/OpsCounter.cpp:39:21: error: missing username/bug in TODO [google-readability-todo,-warnings-as-errors] ->>>>>>> features/llvm-passes - // TODO: Fails to load if this is present - ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // TODO(tfr): Fails to load if this is present -``` - -## Always add copyrights - -Always add copyrights at the top of the file. - -```text -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. -``` - -For header files, prefer to put `#prama once` before the copyright. - -## Tabs vs. spaces - -Seriously, this should not even be a discussion: It does not matter. If you prefer one over the other feel free to write in whatever style you prefer as long as you use `clang-format` before making a PR. Again, the key here is consistency and readibility. diff --git a/src/Passes/Makefile.orig b/src/Passes/Makefile.orig deleted file mode 100644 index 746045c729..0000000000 --- a/src/Passes/Makefile.orig +++ /dev/null @@ -1,10 +0,0 @@ -nothing: - @echo "Preventing the user from accidently running the clean command." - -clean: - rm -rf Release/ - rm -rf Debug/ -<<<<<<< HEAD - -======= ->>>>>>> features/llvm-passes diff --git a/src/Passes/README.md.orig b/src/Passes/README.md.orig deleted file mode 100644 index fa21630a5e..0000000000 --- a/src/Passes/README.md.orig +++ /dev/null @@ -1,367 +0,0 @@ -<<<<<<< HEAD -# Q# Passes for LLVM - -This library defines [LLVM passes](https://llvm.org/docs/Passes.html) used for analysing, optimising and transforming the IR. The Q# pass library is a dynamic library that can be compiled and ran separately from the -======= -# QIR Passes for LLVM - -This library defines [LLVM passes](https://llvm.org/docs/Passes.html) used for analysing, optimising and transforming the IR. The QIR pass library is a dynamic library that can be compiled and ran separately from the ->>>>>>> features/llvm-passes -rest of the project code. While it is not clear whether this possible at the moment, we hope that it will be possible to write passes that enforce the [QIR specification](https://github.com/microsoft/qsharp-language/tree/main/Specifications/QIR). - -## What do LLVM passes do? - -Before getting started, we here provide a few examples of classical use cases for [LLVM passes](https://llvm.org/docs/Passes.html). You find additional [instructive examples here][1]. - -**Example 1: Transformation**. As a first example of what [LLVM passes](https://llvm.org/docs/Passes.html) can do, we look at optimisation. Consider a compiler which -compiles - -```c -double test(double x) { - return (1+2+x)*(x+(1+2)); -} -``` - -into following IR: - -``` -define double @test(double %x) { -entry: - %addtmp = fadd double 3.000000e+00, %x - %addtmp1 = fadd double %x, 3.000000e+00 - %multmp = fmul double %addtmp, %addtmp1 - ret double %multmp -} -``` - -This code is obviously inefficient as we could get rid of one operation by rewritting the code to: - -```c -double test(double x) { - double y = 3+x; - return y * y; -} -``` - -One purpose of [LLVM passes](https://llvm.org/docs/Passes.html) is to allow automatic transformation from the above IR to the IR: - -``` -define double @test(double %x) { -entry: - %addtmp = fadd double %x, 3.000000e+00 - %multmp = fmul double %addtmp, %addtmp - ret double %multmp -} -``` - -**Example 2: Analytics**. Another example of useful passes are those generating and collecting statistics about the program. For instance, one analytics program -makes sense for classical programs is to count instructions used to implement functions. Take the C program: - -```c -int foo(int x) -{ - return x; -} - -void bar(int x, int y) -{ - foo(x + y); -} - -int main() -{ - foo(2); - bar(3, 2); - - return 0; -} -``` - -which produces follow IR (without optimisation): - -```language -define dso_local i32 @foo(i32 %0) #0 { - %2 = alloca i32, align 4 - store i32 %0, i32* %2, align 4 - %3 = load i32, i32* %2, align 4 - ret i32 %3 -} - -define dso_local void @bar(i32 %0, i32 %1) #0 { - %3 = alloca i32, align 4 - %4 = alloca i32, align 4 - store i32 %0, i32* %3, align 4 - store i32 %1, i32* %4, align 4 - %5 = load i32, i32* %3, align 4 - %6 = load i32, i32* %4, align 4 - %7 = add nsw i32 %5, %6 - %8 = call i32 @foo(i32 %7) - ret void -} - -define dso_local i32 @main() #0 { - %1 = alloca i32, align 4 - store i32 0, i32* %1, align 4 - %2 = call i32 @foo(i32 2) - call void @bar(i32 3, i32 2) - ret i32 0 -} -``` - -A stat pass for this code, would collect following statisics: - -```text -Stats for 'foo' -=========================== -Opcode # Used ---------------------------- -load 1 -ret 1 -alloca 1 -store 1 ---------------------------- - -Stats for 'bar' -=========================== -Opcode # Used ---------------------------- -load 2 -add 1 -ret 1 -alloca 2 -store 2 -call 1 ---------------------------- - -Stats for 'main' -=========================== -Opcode # Used ---------------------------- -ret 1 -alloca 1 -store 1 -call 2 ---------------------------- -``` - -**Example 3: Code validation**. A third use case is code validation. For example, one could write a pass to check whether bounds are exceeded on [static arrays][2]. -Note that this is a non-standard usecase as such analysis is usually made using the AST rather than at the IR level. - -**References** - -- [1] https://github.com/banach-space/llvm-tutor#analysis-vs-transformation-pass -- [2] https://github.com/victor-fdez/llvm-array-check-pass - -## Out-of-source Pass - -This library is build as set of out-of-source-passes. All this means is that we will not be downloading the LLVM repository and modifying this repository directly. You can read more [here](https://llvm.org/docs/CMake.html#cmake-out-of-source-pass). - -# Getting started - -## Dependencies - -This library is written in C++ and depends on: - -- LLVM - -Additional development dependencies include: - -- CMake -- clang-format -- clang-tidy - -## Building the passes - -To build the passes, create a new build directory and switch to that directory: - -```sh -mkdir Debug -cd Debug/ -``` - -To build the library, first configure CMake from the build directory - -```sh -cmake .. -``` - -and then make your target - -```sh -make [target] -``` - -<<<<<<< HEAD -The default target is `all`. Other valid targets are the name of the folders in `libs/` found in the passes root. -======= -Valid targets are the name of the folders in `libs/` found in the passes root. ->>>>>>> features/llvm-passes - -## Running a pass - -You can run a pass using [opt](https://llvm.org/docs/CommandGuide/opt.html) as follows: - -```sh -cd examples/ClassicalIrCommandline -make emit-llvm-bc -opt -load-pass-plugin ../../{Debug,Release}/libOpsCounter.{dylib,so} --passes="print" -disable-output classical-program.bc -``` - -<<<<<<< HEAD -For a gentle introduction, see examples. -======= -For a detailed tutorial, see examples. ->>>>>>> features/llvm-passes - -## Creating a new pass - -To make it easy to create a new pass, we have created a few templates to get you started quickly: - -```sh -<<<<<<< HEAD -./manage create-pass HelloWorld -======= -% ./manage create-pass HelloWorld ->>>>>>> features/llvm-passes -Available templates: - -1. Function Pass - -Select a template:1 -``` - -At the moment you only have one choice which is a function pass. Over time we will add additional templates. Once you have instantiated your template, you are ready to build it: - -```sh -<<<<<<< HEAD -mkdir Debug -cd Debug -cmake .. -======= -% mkdir Debug -% cd Debug -% cmake .. ->>>>>>> features/llvm-passes --- The C compiler identification is AppleClang 12.0.5.12050022 --- The CXX compiler identification is AppleClang 12.0.5.12050022 -(...) --- Configuring done --- Generating done -<<<<<<< HEAD --- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/Debug - -make -======= --- Build files have been written to: ./qsharp-compiler/src/Passes/Debug - -% make ->>>>>>> features/llvm-passes - -[ 25%] Building CXX object libs/CMakeFiles/OpsCounter.dir/OpsCounter/OpsCounter.cpp.o -[ 50%] Linking CXX shared library libOpsCounter.dylib -[ 50%] Built target OpsCounter -[ 75%] Building CXX object libs/CMakeFiles/HelloWorld.dir/HelloWorld/HelloWorld.cpp.o -[100%] Linking CXX shared library libHelloWorld.dylib -[100%] Built target HelloWorld -``` - -Your new pass is ready to be implemented. Open `libs/HelloWorld/HelloWorld.cpp` to implement the details of the pass. At the moment, the -template will not do much except for print the function names of your code. To test your new pass go to the directory `examples/ClassicalIrCommandline`, -build an IR and run the pass: - -```sh -<<<<<<< HEAD -cd ../examples/ClassicalIrCommandline -make -opt -load-pass-plugin ../../Debug/libs/libHelloWorld.{dylib,so} --passes="hello-world" -disable-output classical-program.ll -======= -% cd ../examples/ClassicalIrCommandline -% make -% opt -load-pass-plugin ../../Debug/libs/libHelloWorld.{dylib,so} --passes="hello-world" -disable-output classical-program.ll ->>>>>>> features/llvm-passes -``` - -If everything worked, you should see output like this: - -```sh -Implement your pass here: foo -Implement your pass here: bar -Implement your pass here: main -``` - -## CI - -Before making a pull request with changes to this library, please ensure that style checks passes, that the code compiles, -unit test passes and that there are no erros found by the static analyser. - -To setup the CI environment, run following commands - -```sh -source develop.env -virtualenv develop__venv -source develop__venv/bin/activate -pip install -r requirements.txt -``` - -These adds the necessary environment variables to ensure that you have the `TasksCI` package and all required dependencies. - -To check the style, run - -```sh -./manage stylecheck -``` - -To test that the code compiles and tests passes run - -```sh -./manage test -``` - -Finally, to analyse the code, run - -```sh -./manage lint -``` - -You can run all processes by running: - -```sh -./manage runci -``` - -As `clang-tidy` and `clang-format` acts slightly different from version to version and on different platforms, it is recommended -that you use a docker image to perform these steps. TODO(TFR): The docker image is not added yet and this will be documented in the future. - -# Developer FAQ - -## Pass does not load - -One error that you may encounter is that an analysis pass does not load with output similar to this: - -```sh -<<<<<<< HEAD -opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -enable-debugify --passes="operation-counter" -disable-output classical-program.bc -======= -% opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -enable-debugify --passes="operation-counter" -disable-output classical-program.bc ->>>>>>> features/llvm-passes -Failed to load passes from '../../Debug/libQSharpPasses.dylib'. Request ignored. -opt: unknown pass name 'operation-counter' -``` - -This is likely becuase you have forgotten to instantiate static class members. For instance, in the case of an instance of `llvm::AnalysisInfoMixin` you are required to have static member `Key`: - -```cpp -class COpsCounterPass : public llvm::AnalysisInfoMixin { -private: - static llvm::AnalysisKey Key; //< REQUIRED by llvm registration - friend struct llvm::AnalysisInfoMixin; -}; -``` - -If you forget to instantiate this variable in your corresponding `.cpp` file, - -```cpp -// llvm::AnalysisKey COpsCounterPass::Key; //< Uncomment this line to make everything work -``` - -everything will compile, but the pass will fail to load. There will be no linking errors either. diff --git a/src/Passes/docs/index.md.orig b/src/Passes/docs/index.md.orig deleted file mode 100644 index 2fe6b79b93..0000000000 --- a/src/Passes/docs/index.md.orig +++ /dev/null @@ -1,7 +0,0 @@ -<<<<<<< HEAD -# Q# pass documentation -======= -# QIR pass documentation ->>>>>>> features/llvm-passes - -This directory and file is a placeholder for describing LLVM passes which was already implemented. diff --git a/src/Passes/examples/ClassicalIrCommandline/Makefile.orig b/src/Passes/examples/ClassicalIrCommandline/Makefile.orig deleted file mode 100644 index c19b98c8ca..0000000000 --- a/src/Passes/examples/ClassicalIrCommandline/Makefile.orig +++ /dev/null @@ -1,26 +0,0 @@ -<<<<<<< HEAD -emit-llvm-cpp: - clang -O3 -S -std=c++17 -emit-llvm classical-program.cpp -o classical-program.ll - -======= ->>>>>>> features/llvm-passes -emit-llvm: - clang -O0 -S -emit-llvm classical-program.c -o classical-program.ll - -emit-llvm-bc: - clang -O0 -c -emit-llvm classical-program.c -o classical-program.bc - - -debug-ng-pass-mac: emit-llvm-bc - opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -debug --passes="operation-counter" -disable-output classical-program.bc - - - -clean: - rm -f classical-program.ll -<<<<<<< HEAD - rm -f classical-program.bc -======= - rm -f classical-program.bc - ->>>>>>> features/llvm-passes diff --git a/src/Passes/examples/ClassicalIrCommandline/README.md.orig b/src/Passes/examples/ClassicalIrCommandline/README.md.orig deleted file mode 100644 index 4ee98bc634..0000000000 --- a/src/Passes/examples/ClassicalIrCommandline/README.md.orig +++ /dev/null @@ -1,60 +0,0 @@ -# Emitting classical IRs - -This example demonstrates how to emit a classical IR and run a custom -pass on it. The purpose of this example is to teach the user how to apply -a pass to a IR using commandline tools only. - -IRs can be represented either by a human readible language or through bytecode. For -C programs former is generated by - -```sh -<<<<<<< HEAD - clang -O1 -S -emit-llvm classical-program.c -o classical-program.ll -``` - -where as the latter is generated writing: - -```sh - clang -O1 -c -emit-llvm classical-program.c -o classical-program.bc -======= -% clang -O1 -S -emit-llvm classical-program.c -o classical-program.ll -``` - -whereas the latter is generated by executing: - -```sh -% clang -O1 -c -emit-llvm classical-program.c -o classical-program.bc ->>>>>>> features/llvm-passes -``` - -This generates a nice and short IR which makes not too overwhelming to understand what is going on. - -## Legacy passes - -<<<<<<< HEAD -This part assumes that you have build the QsPasses library. - -```sh -opt -load ../../{Debug,Release}/libQSharpPasses.{dylib,so} -legacy-operation-counter -analyze classical-program.ll -======= -This part assumes that you have built the Passes library. - -```sh -% opt -load ../../{Debug,Release}/libQSharpPasses.{dylib,so} -legacy-operation-counter -analyze classical-program.ll ->>>>>>> features/llvm-passes -``` - -## Next-gen passes - -<<<<<<< HEAD -This part assumes that you have build the QsPasses library. - -```sh -opt -load-pass-plugin ../../{Debug,Release}/libs/libQSharpPasses.{dylib,so} --passes="print" -disable-output classical-program.bc -======= -This part assumes that you have built the Passes library. - -```sh -% opt -load-pass-plugin ../../{Debug,Release}/libQSharpPasses.{dylib,so} --passes="operation-counter" -disable-output classical-program.bc ->>>>>>> features/llvm-passes -``` diff --git a/src/Passes/examples/ClassicalIrCommandline/classical-program.c.orig b/src/Passes/examples/ClassicalIrCommandline/classical-program.c.orig deleted file mode 100644 index 70e6777170..0000000000 --- a/src/Passes/examples/ClassicalIrCommandline/classical-program.c.orig +++ /dev/null @@ -1,21 +0,0 @@ -int foo(int x) -{ - return x; -} - -void bar(int x, int y) -{ - foo(x + y); -} - -int main() -{ - foo(2); - bar(3, 2); - - return 0; -<<<<<<< HEAD -} -======= -} ->>>>>>> features/llvm-passes diff --git a/src/Passes/examples/OptimisationUsingOpt/README.md.orig b/src/Passes/examples/OptimisationUsingOpt/README.md.orig deleted file mode 100644 index ea94699bf1..0000000000 --- a/src/Passes/examples/OptimisationUsingOpt/README.md.orig +++ /dev/null @@ -1,70 +0,0 @@ -# Optimisation Using Opt - -In this document, we give a brief introduction on how to perform IR optimisations -using `opt`. - -## Stripping dead code - -We start out by considering a simple case of a program that just returns 0: - -```qsharp -namespace Example { - @EntryPoint() - operation OurAwesomeQuantumProgram(nQubits : Int) : Int { - - return 0; - } -} -``` - -You find the code for this in the folder `SimpleExample`. To generate a QIR for this code, go to the folder and run - -```sh -<<<<<<< HEAD -cd SimpleExample/ -dotnet clean SimpleExample.csproj -(...) -dotnet build SimpleExample.csproj -c Debug -``` - -If everything went well, you should now have a subdirectory called `qir` and inside `qir`, you will find `SimpleExample.ll`. Depending on the version of Q#, -======= -% cd SimpleExample/ -% dotnet clean SimpleExample.csproj -(...) -% dotnet build SimpleExample.csproj -c Debug -``` - -If everything went well, you should now have a subdirectory called `qir` and inside `qir`, you will find `SimpleExample.ll`. Depending on your compiler, ->>>>>>> features/llvm-passes -the generated QIR will vary, but in general, it will be relatively long. Looking at this file, you will see -that the total length is a little above 2000 lines of code. That is pretty extensive for a program which essentially -does nothing so obviously, most of the generated QIR must be dead code. We can now use `opt` to get rid of the dead code and we do this by invoking: - -```sh -opt -S qir/SimpleExample.ll -O3 > qir/SimpleExample-O3.ll -``` - -All going well, this should reduce your QIR to - -```language -; Function Attrs: norecurse nounwind readnone willreturn -define i64 @Example__QuantumFunction__Interop(i64 %nQubits) local_unnamed_addr #0 { -entry: - ret i64 0 -} - -define void @Example__QuantumFunction(i64 %nQubits) local_unnamed_addr #1 { -entry: - %0 = tail call %String* @__quantum__rt__int_to_string(i64 0) - tail call void @__quantum__rt__message(%String* %0) - tail call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} -``` - -<<<<<<< HEAD -plus a few extra delcarations. -======= -with a few additional declarations. ->>>>>>> features/llvm-passes diff --git a/src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile.orig b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile.orig deleted file mode 100644 index 390ae68736..0000000000 --- a/src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile.orig +++ /dev/null @@ -1,17 +0,0 @@ -<<<<<<< HEAD -======= -all: qir/SimpleExample.ll - -qir/SimpleExample.ll: - dotnet build SimpleExample.csproj -c Debug - ->>>>>>> features/llvm-passes -clean: - rm -rf bin - rm -rf obj - rm -rf qir -<<<<<<< HEAD - -======= - ->>>>>>> features/llvm-passes diff --git a/src/Passes/include/Llvm.hpp b/src/Passes/include/Llvm.hpp index 1f54a2ae33..80a4728b83 100644 --- a/src/Passes/include/Llvm.hpp +++ b/src/Passes/include/Llvm.hpp @@ -33,7 +33,7 @@ #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" -q + // Building #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constants.h" diff --git a/src/Passes/include/Llvm.hpp.orig b/src/Passes/include/Llvm.hpp.orig deleted file mode 100644 index 54378660c7..0000000000 --- a/src/Passes/include/Llvm.hpp.orig +++ /dev/null @@ -1,62 +0,0 @@ -#pragma once -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#pragma GCC diagnostic ignored "-Wpedantic" -#pragma GCC diagnostic ignored "-Wunused-value" -#pragma GCC diagnostic ignored "-Wsign-compare" -#pragma GCC diagnostic ignored "-Wunknown-warning-option" -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Wall" -#pragma GCC diagnostic ignored "-Weverything" -#endif - -#if defined(__clang__) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wconversion" -#pragma clang diagnostic ignored "-Wpedantic" -#pragma clang diagnostic ignored "-Werror" -#pragma clang diagnostic ignored "-Wshadow" -#pragma clang diagnostic ignored "-Wreturn-std-move" -#pragma clang diagnostic ignored "-Wunknown-warning-option" -#pragma clang diagnostic ignored "-Wunused-parameter" -#pragma clang diagnostic ignored "-Wall" -#pragma clang diagnostic ignored "-Weverything" -#endif - -<<<<<<< HEAD -// Passes -#include "llvm/Passes/PassBuilder.h" -#include "llvm/Passes/PassPlugin.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Transforms/Utils/BasicBlockUtils.h" -#include "llvm/Transforms/Utils/Cloning.h" - -// Building -#include "llvm/IR/BasicBlock.h" -#include "llvm/IR/Constants.h" -#include "llvm/IR/DerivedTypes.h" -#include "llvm/IR/Function.h" -#include "llvm/IR/IRBuilder.h" -#include "llvm/IR/LLVMContext.h" -#include "llvm/IR/LegacyPassManager.h" -#include "llvm/IR/Module.h" -#include "llvm/IR/Type.h" -#include "llvm/IR/Verifier.h" -======= -#include "llvm/IR/LegacyPassManager.h" -#include "llvm/Passes/PassBuilder.h" -#include "llvm/Passes/PassPlugin.h" -#include "llvm/Support/raw_ostream.h" ->>>>>>> features/llvm-passes - -#if defined(__clang__) -#pragma clang diagnostic pop -#endif - -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif diff --git a/src/Passes/libs/CMakeLists.txt.orig b/src/Passes/libs/CMakeLists.txt.orig deleted file mode 100644 index 0feef87119..0000000000 --- a/src/Passes/libs/CMakeLists.txt.orig +++ /dev/null @@ -1,49 +0,0 @@ - -macro(list_qs_passes result) - file(GLOB children RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/*) - set(dirlist "") - foreach(child ${children}) - if(IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/${child}) - list(APPEND dirlist ${child}) - endif() - endforeach() - set(${result} ${dirlist}) -endmacro() - -list_qs_passes(ALL_PASSES) - -foreach(pass_plugin ${ALL_PASSES}) - - # Getting sources - file(GLOB sources RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/${pass_plugin}/*.cpp) - - # Adding library - add_library(${pass_plugin} - SHARED - ${sources}) - - # Adding include directories - target_include_directories( - ${pass_plugin} - PRIVATE - "${CMAKE_CURRENT_SOURCE_DIR}" - ) - - target_include_directories( - ${pass_plugin} - PRIVATE - "${CMAKE_CURRENT_SOURCE_DIR}/../include" - ) - - - # Linking - target_link_libraries(${pass_plugin} - "$<$:-undefined dynamic_lookup>") - -endforeach() -<<<<<<< HEAD - - -# add_library(passes SHARED ${ALL_PASSES}) -======= ->>>>>>> features/llvm-passes diff --git a/src/Passes/requirements.txt.orig b/src/Passes/requirements.txt.orig deleted file mode 100644 index f605709872..0000000000 --- a/src/Passes/requirements.txt.orig +++ /dev/null @@ -1,5 +0,0 @@ -click==8.0.1 -<<<<<<< HEAD -lit==12.0.1 -======= ->>>>>>> features/llvm-passes diff --git a/src/Passes/site-packages/TasksCI/builder.py.orig b/src/Passes/site-packages/TasksCI/builder.py.orig deleted file mode 100644 index 2ad3fc8aca..0000000000 --- a/src/Passes/site-packages/TasksCI/builder.py.orig +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import os -from . import settings -from . import toolchain -from .settings import PROJECT_ROOT -import logging -import subprocess -import sys -<<<<<<< HEAD -======= -from typing import Union - -OptionalInt = Union[int, None] -OptionalStr = Union[str, None] - ->>>>>>> features/llvm-passes - -logger = logging.getLogger() - - -<<<<<<< HEAD -def configure_cmake(build_dir: str, generator=None): -======= -def configure_cmake(build_dir: str, generator=None) -> None: ->>>>>>> features/llvm-passes - """ - Function that creates a build directory and runs - cmake to configure make, ninja or another generator. - """ - - logger.info("Source: {}".format(PROJECT_ROOT)) - logger.info("Build : {}".format(build_dir)) - - os.chdir(PROJECT_ROOT) - os.makedirs(build_dir, exist_ok=True) - - cmake_cmd = [toolchain.discover_cmake()] - - if generator is not None: - cmake_cmd += ['-G', generator] - - cmake_cmd += [PROJECT_ROOT] - - exit_code = subprocess.call(cmake_cmd, cwd=build_dir) - if exit_code != 0: - logger.error('Failed to configure project') - sys.exit(exit_code) - - -<<<<<<< HEAD -def build_project(build_dir: str, generator=None, concurrency=None): -======= -def build_project(build_dir: str, generator: OptionalStr = None, concurrency: OptionalInt = None) -> None: ->>>>>>> features/llvm-passes - """ - Given a build directory, this function builds all targets using - a specified generator and concurrency. - """ - - if generator in ["make", None]: - cmd = ["make"] - elif generator in ["ninja"]: - cmd = ["ninja"] - - if concurrency is None: -<<<<<<< HEAD - concurrency = settings.get_concurrency() -======= - concurrency = settings.get_degree_of_concurrency() ->>>>>>> features/llvm-passes - - cmd.append('-j{}'.format(concurrency)) - - exit_code = subprocess.call(cmd, cwd=build_dir) - - if exit_code != 0: - logger.error('Failed to make the project') - sys.exit(exit_code) - - -<<<<<<< HEAD -def run_tests(build_dir: str, concurrency=None): - """ - Runs the unit tests given a build directory. - """ - fail = False - - # Running lit tests - lit_cmd = ["lit", "tests/", "-v"] - exit_code = subprocess.call(lit_cmd, cwd=build_dir) - - if exit_code != 0: - logger.error('Lit test failed') - fail = True - - # Running CMake tests -======= -def run_tests(build_dir: str, concurrency: OptionalInt = None) -> None: - """ - Runs the unit tests given a build directory. - """ - ->>>>>>> features/llvm-passes - cmake_cmd = [toolchain.discover_ctest()] - - if concurrency is not None: - raise BaseException("No support for concurrent testing at the moment.") - - exit_code = subprocess.call(cmake_cmd, cwd=build_dir) - if exit_code != 0: -<<<<<<< HEAD - logger.error('CTest failed project') - fail = True - - if fail: - sys.exit(exit_code) - - -def main(build_dir: str, generator=None, test: bool = False): -======= - logger.error('Failed to configure project') - sys.exit(exit_code) - - -def main(build_dir: str, generator: OptionalStr = None, test: bool = False) -> None: ->>>>>>> features/llvm-passes - """ - Runs the entire build process by first configuring, the building - and optionally testing the codebase. - """ - - configure_cmake(build_dir, generator) - - build_project(build_dir, generator) - - if test: - run_tests(build_dir) diff --git a/src/Passes/site-packages/TasksCI/cli.py.orig b/src/Passes/site-packages/TasksCI/cli.py.orig deleted file mode 100644 index 82c051d2f7..0000000000 --- a/src/Passes/site-packages/TasksCI/cli.py.orig +++ /dev/null @@ -1,282 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .formatting import main as style_check_main -from .builder import main as builder_main -from .linting import main as lint_main, clang_tidy_diagnose - -import click -import logging -import sys -import os -import re -<<<<<<< HEAD -======= -from typing import Union - -OptionalInt = Union[int, None] -OptionalStr = Union[str, None] ->>>>>>> features/llvm-passes - -# Important directories -LIB_DIR = os.path.abspath(os.path.dirname((__file__))) -TEMPLATE_DIR = os.path.join(LIB_DIR, "templates") -SOURCE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(LIB_DIR))) - -# Logging configuration -logger = logging.getLogger() -ch = logging.StreamHandler() -ch.setLevel(logging.DEBUG) -formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') -ch.setFormatter(formatter) -logger.addHandler(ch) - -# By default we only log errors -logger.setLevel(logging.ERROR) - - -@click.group() -@click.option('--loglevel', default="error") -<<<<<<< HEAD -def cli(loglevel): -======= -def cli(loglevel: str) -> None: ->>>>>>> features/llvm-passes - """ - Implements the general CLI options such as logging level. - """ - - # Valid values - levels = { - "critical": 50, - "error": 40, - "warning": 30, - "info": 20, - "debug": 10, - "notset": 0 - } - - # Getting the logging level and updating - loglevel = loglevel.lower() - if loglevel not in levels: - logger.critical("Invalid log level") - sys.exit(-1) - - logger.setLevel(levels[loglevel]) - logger.info("Loglevel set to {}".format(loglevel)) - - -@cli.command() -@click.option('--fix-issues', default=False, is_flag=True) -<<<<<<< HEAD -def stylecheck(fix_issues): -======= -def stylecheck(fix_issues: bool) -> None: ->>>>>>> features/llvm-passes - """ - Command for checking the style and optionally fixing issues. - Note that some issues are not automatically fixed. - """ - - logger.info("Invoking style checker") - style_check_main(fix_issues) - - -@cli.command() -@click.option("--diagnose", default=False, is_flag=True) -@click.option('--fix-issues', default=False, is_flag=True) -@click.option('--force', default=False, is_flag=True) -<<<<<<< HEAD -def lint(diagnose, fix_issues, force): -======= -def lint(diagnose: bool, fix_issues: bool, force: bool) -> None: ->>>>>>> features/llvm-passes - """ - Command for linting the code. - """ - - # Helpful option in order to diagnose Clang tidy. - if diagnose: - clang_tidy_diagnose() - - # In case we are diagnosing, no run is performed. - return - - # Allowing Clang tidy to attempt to fix issues. Generally, - # it is discouraged to use this features as it may result in - # a catastrophy - if fix_issues: - if not force: - print("""Fixing isssues using Clang Tidy will break your code. -Make sure that you have committed your changes BEFORE DOING THIS. -Even so, this feature is experimental and there have been reports of -clang-tidy modying system libraries - therefore, USE THIS FEATURE AT -YOUR OWN RISK. - -Write 'I understand' to proceed.""") - print(":") - x = input() - if x.lower() != "i understand": - print("Wrong answer - stopping!") - exit(-1) - - # Running the linter - logger.info("Invoking linter") - lint_main(fix_issues) - - -@cli.command() -@click.option('--debug/--no-debug', default=True) -@click.option('--generator', default=None) -<<<<<<< HEAD -def test(debug, generator): -======= -def test(debug: bool, generator: OptionalStr) -> None: ->>>>>>> features/llvm-passes - """ - Command to build and test the code base. - """ - - logger.info("Building and testing") - - build_dir = "Debug" - if not debug: - build_dir = "Release" - - builder_main(build_dir, generator, True) - - -@cli.command() -<<<<<<< HEAD -def runci(): -======= -def runci() -> None: ->>>>>>> features/llvm-passes - """ - Command to run all CI commands, starting with style check - then linting and finally unit tests. - """ - - build_dir = "Debug" - - style_check_main(False) - lint_main(False) - builder_main(build_dir, None, True) - - -@cli.command() -@click.argument( - "name" -) -@click.option( - "--template", - default=None, -) -<<<<<<< HEAD -def create_pass(name, template): -======= -def create_pass(name: str, template: OptionalStr) -> None: ->>>>>>> features/llvm-passes - """ - Helper command to create a new pass from a template. Templates - can be found in the template directory of the TasksCI tool. - """ - - # Checking whether the target already exists - target_dir = os.path.join(SOURCE_DIR, "libs", name) - if os.path.exists(target_dir): - logger.error("Pass '{}' already exists".format(name)) - exit(-1) - - # In case no template was specified, we list the option - # such that the user can choose one - if template is None: - - # Listing options - options = [] - print("Available templates:") - print("") - for template_name in os.listdir(TEMPLATE_DIR): - if os.path.isdir(os.path.join(TEMPLATE_DIR, template_name)): - options.append(template_name) - - # Printing option - pretty_template_name = re.sub(r'(? len(options) + 1: - try: - n = input("Select a template:") -======= - print("Type 'q' or 'quit' to abort.") - print("") - while n < 1 or n > len(options) + 1: - try: - n = input("Select a template:") - - if n == "q" or n == "quit": - logger.info("User aborted.") - exit(0) - ->>>>>>> features/llvm-passes - n = int(n) - except: # noqa: E722 - logger.error("Invalid choice") - exit(-1) - - # Getting the template - template = options[n - 1] - - # Checking that the template is valid. Note that even though - # we list the templates above, the user may have specified an - # invalid template via the command line. - template_dir = os.path.join(TEMPLATE_DIR, template) - if not os.path.exists(template_dir): - logger.error("Template does not exist") - exit(-1) - - # Creating an operation name by transforming the original name - # from "CamelCase" to "camel-case" - operation_name = re.sub(r'(?>>>>>> features/llvm-passes - -logger = logging.getLogger("FormatChecker") -CLANG_FORMAT_EXE = discover_formatter() - - -<<<<<<< HEAD -def require_token(token, filename, contents, cursor, fix_issues): -======= -def require_token(token: str, filename: str, contents: str, cursor: int, fix_issues: bool) -> int: ->>>>>>> features/llvm-passes - """ - Validator function to require that the next part of the document is a specific token. - """ - failed = False - if not contents[cursor:].startswith(token): - logger.error("{}: File must have {} at position {}".format(filename, token, cursor)) - failed = True - return cursor + len(token), failed - - -<<<<<<< HEAD -def require_pragma_once(filename, contents, cursor, fix_issues): -======= -def require_pragma_once(filename: str, contents: str, cursor: int, fix_issues: bool) -> int: ->>>>>>> features/llvm-passes - """ - Validator function that requires '#pragma once' in headers - """ - return require_token("#pragma once\n", filename, contents, cursor, fix_issues) - - -<<<<<<< HEAD -def enforce_cpp_license(filename, contents, cursor, fix_issues): -======= -def enforce_cpp_license(filename: str, contents: str, cursor: int, fix_issues: bool) -> int: ->>>>>>> features/llvm-passes - """ - Validator function that requires copyrights in C++ files - """ - return require_token("""// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -""", filename, contents, cursor, fix_issues) - - -<<<<<<< HEAD -def enforce_py_license(filename, contents, cursor, fix_issues): -======= -def enforce_py_license(filename: str, contents: str, cursor: int, fix_issues: bool) -> int: ->>>>>>> features/llvm-passes - """ - Validator function that requires copyrights in Python files - """ - # Allowing empty files - if contents.strip() == "": - return cursor, False - - return require_token("""# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -""", filename, contents, cursor, fix_issues) - - -<<<<<<< HEAD -def enforce_formatting(filename, contents, cursor, fix_issues): -======= -def enforce_formatting(filename: str, contents: str, cursor: int, fix_issues: bool) -> int: ->>>>>>> features/llvm-passes - """ - Validator function that tests whether the style of the C++ - source file follows that dictated by `.clang-format`. - """ - - # Opening a pipe with Clang format - p = subprocess.Popen( - [CLANG_FORMAT_EXE, '-style=file'], - stdout=subprocess.PIPE, - stdin=subprocess.PIPE, - cwd=PROJECT_ROOT) - - # Passing the contents of the file - output = p.communicate(input=contents.encode())[0] - - # In case something went wrong, we raise an exception - if p.returncode != 0: - raise Exception('Could not format contents') - - # Otherwise we check that the input is the same as the output - formatted = output.decode('utf-8') - if formatted != contents: - - # Updating the contents of the file if requested by the user - if fix_issues: - logger.info("Formatting {}".format(filename)) - with open(filename, "w") as filebuffer: - filebuffer.write(formatted) - return cursor, False - - logger.error("{} was not correctly formatted.".format(filename)) - return cursor, True - - return cursor, False - - -# Source pipeline definitions. These instructs the next part of -# the code on how to validate each source file. -<<<<<<< HEAD -======= - ->>>>>>> features/llvm-passes -SOURCE_PIPELINES = [ - { - "name": "C++ Main", - "src": path.join(PROJECT_ROOT, "libs"), - - "pipelines": { - "hpp": [ - require_pragma_once, - enforce_cpp_license, - enforce_formatting - ], - "cpp": [ - enforce_cpp_license, - enforce_formatting - ] - } - }, - { - "name": "Scripts", - "src": path.join(PROJECT_ROOT, "site-packages"), - - "pipelines": { - "py": [ - enforce_py_license, - ], - } - } -] - - -<<<<<<< HEAD -def execute_pipeline(pipeline, filename: str, fix_issues: bool): -======= -def execute_pipeline(pipeline: IPipeline, filename: str, fix_issues: bool) -> bool: ->>>>>>> features/llvm-passes - """ - Helper function to execute a pipeline for a specific file - """ - logger.info("Executing pipeline for {}".format(filename)) - cursor = 0 - - # Reading the file - with open(filename, "r") as fb: - contents = fb.read() - - # Executing each step of the pipeline - failed = False - for fnc in pipeline: - cursor, f = fnc(filename, contents, cursor, fix_issues) - failed = failed or f - - return failed - - -<<<<<<< HEAD -def main(fix_issues: bool = False): -======= -def main(fix_issues: bool = False) -> None: ->>>>>>> features/llvm-passes - """ - This function runs a pipeline for every file that - matches the description given in SOURCE_PIPELINES. - """ - failed = False - - # Iterating through every definition - for language in SOURCE_PIPELINES: - - logger.info("Formatting {}".format(language["name"])) - basedir = language["src"] - pipelines = language["pipelines"] - - # Finding all files whose location matches that of the - # definition - for root, dirs, files in os.walk(basedir): - - for filename in files: - if "." not in filename: - continue - - # Executing the pipeline if appropriate - _, ext = filename.rsplit(".", 1) - if ext in pipelines: - f = execute_pipeline(pipelines[ext], path.join(root, filename), fix_issues) - failed = failed or f - - if failed: - logger.error("Your code did not pass formatting.") - sys.exit(-1) - - -if __name__ == "__main__": - main() diff --git a/src/Passes/site-packages/TasksCI/linting.py.orig b/src/Passes/site-packages/TasksCI/linting.py.orig deleted file mode 100644 index 7145fc7b0f..0000000000 --- a/src/Passes/site-packages/TasksCI/linting.py.orig +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import logging -from .builder import configure_cmake, build_project -from . import toolchain -from .settings import PROJECT_ROOT -import os -import subprocess -import sys -<<<<<<< HEAD -======= -from typing import Union - -OptionalInt = Union[int, None] -OptionalStr = Union[str, None] ->>>>>>> features/llvm-passes - -logger = logging.getLogger("Linter") - - -<<<<<<< HEAD -def clang_tidy_diagnose(): -======= -def clang_tidy_diagnose() -> None: ->>>>>>> features/llvm-passes - """ - Helper function to print the configuration of Clang tidy - """ - - # Getting the config - config = subprocess.check_output( - [toolchain.discover_tidy(), '-dump-config'], cwd=PROJECT_ROOT).decode() - - # Getting the list of checks - check_list = subprocess.check_output( - [toolchain.discover_tidy(), '-list-checks'], cwd=PROJECT_ROOT).decode() - - # Printing it all to the user - checks = [x.strip() for x in check_list.split("\n") if '-' in x] - - print("Working directory: {}".format(PROJECT_ROOT)) - print("") - print(config) - print("") - print("Clang tidy checks:") - - for check in sorted(checks): - print(" -", check) - - -<<<<<<< HEAD -def run_clang_tidy(build_dir, filename, fix_issues: bool = False): -======= -def run_clang_tidy(build_dir: str, filename: str, fix_issues: bool = False) -> bool: ->>>>>>> features/llvm-passes - """ - Function that runs Clang tidy for a single file given a build directory - and a filename. - """ - - # Configuring the command line arguments - clang_tidy_binary = toolchain.discover_tidy() - - cmd = [clang_tidy_binary] - output_file = os.path.abspath(os.path.join(build_dir, 'clang_tidy_fixes.yaml')) - - cmd.append('-header-filter=".*\\/(Passes)\\/(libs)\\/.*"') - cmd.append('-p=' + build_dir) - cmd.append('-export-fixes={}'.format(output_file)) - cmd.append('--use-color') - - if fix_issues: - cmd.append("-fix") - - cmd.append(filename) - - logger.info("Running '{}'".format(" ".join(cmd))) - - # Getting the output - p = subprocess.Popen( - " ".join(cmd), - stdout=subprocess.PIPE, - stdin=subprocess.PIPE, - stderr=subprocess.PIPE, - cwd=PROJECT_ROOT, - shell=True) - - output, err = p.communicate() - - output = output.decode() - err = err.decode() - - if p.returncode != 0: - - # The return value is negative even if the user code is without - # errors, so we check whether there are any errors specified in - # error output - if "error" in err: - sys.stderr.write(output) - sys.stderr.write(err) - - logger.error("{} failed static analysis".format(filename)) - return False - - logger.info("All good!") - return True - - -<<<<<<< HEAD -def main_cpp(fix_issues: bool): -======= -def main_cpp(fix_issues: bool) -> bool: ->>>>>>> features/llvm-passes - """ - Main function for C++ linting. This function builds and lints - the code. - """ - - logger.info("Linting") - build_dir = os.path.join(PROJECT_ROOT, "Debug") - source_dir = os.path.join(PROJECT_ROOT, "libs") - generator = None - extensions = ["cpp"] - - # Configuring CMake - configure_cmake(build_dir, generator) - - # Building - build_project(build_dir, generator) - - # Generating list of files - # TODO(TFR): Ensure that it is only those which were changed that are - # analysed - files_to_analyse = [] - - for root, dirs, files in os.walk(source_dir): - for filename in files: - if "." not in filename: - continue - - _, ext = filename.rsplit(".", 1) - if ext in extensions: - files_to_analyse.append(os.path.join(root, filename)) - - success = True - for filename in files_to_analyse: - success = success and run_clang_tidy(build_dir, filename, fix_issues=fix_issues) - return success - - -<<<<<<< HEAD -def main(fix_issues: bool): -======= -def main(fix_issues: bool) -> None: ->>>>>>> features/llvm-passes - if not main_cpp(fix_issues): - sys.exit(-1) diff --git a/src/Passes/site-packages/TasksCI/settings.py.orig b/src/Passes/site-packages/TasksCI/settings.py.orig deleted file mode 100644 index 62f2f39331..0000000000 --- a/src/Passes/site-packages/TasksCI/settings.py.orig +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from os import path -import multiprocessing - -PROJECT_ROOT = path.abspath(path.dirname(path.dirname(path.dirname(__file__)))) - -MAX_CONCURRENCY = 7 - - -<<<<<<< HEAD -def get_concurrency(): -======= -def get_degree_of_concurrency() -> int: ->>>>>>> features/llvm-passes - """ - Function that gives a default concurrency for the compilation - and testing process. - """ - return min(MAX_CONCURRENCY, multiprocessing.cpu_count()) diff --git a/src/Passes/site-packages/TasksCI/toolchain.py.orig b/src/Passes/site-packages/TasksCI/toolchain.py.orig deleted file mode 100644 index ddd96c6e27..0000000000 --- a/src/Passes/site-packages/TasksCI/toolchain.py.orig +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import shutil - - -<<<<<<< HEAD -def discover_formatter(): -======= -def discover_formatter() -> str: ->>>>>>> features/llvm-passes - """ - Finds the clang-format executable - """ - return shutil.which("clang-format") - - -<<<<<<< HEAD -def discover_tidy(): -======= -def discover_tidy() -> str: ->>>>>>> features/llvm-passes - """ - Finds the clang-tidy executable - """ - return shutil.which("clang-tidy") - - -<<<<<<< HEAD -def discover_cmake(): -======= -def discover_cmake() -> str: ->>>>>>> features/llvm-passes - """ - Finds the cmake executable - """ - return shutil.which("cmake") - - -<<<<<<< HEAD -def discover_ctest(): -======= -def discover_ctest() -> str: ->>>>>>> features/llvm-passes - """ - Finds the ctest executable - """ - return shutil.which("ctest") From c64bf691410cc56c8c8ebb395b2a959cb39f601e Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 30 Jul 2021 07:49:31 +0200 Subject: [PATCH 048/106] Removing garbage --- .../examples/QubitAllocationAnalysis/test.txt | 199 ------------------ 1 file changed, 199 deletions(-) delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/test.txt diff --git a/src/Passes/examples/QubitAllocationAnalysis/test.txt b/src/Passes/examples/QubitAllocationAnalysis/test.txt deleted file mode 100644 index 133f3304be..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/test.txt +++ /dev/null @@ -1,199 +0,0 @@ -pushd ../../ && mkdir -p Debug && cd Debug && cmake ..&& popd || popd -~/Documents/Projects/qsharp-compiler/src/Passes ~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis --- Found LLVM 12.0.1 --- Using LLVMConfig.cmake in: /usr/local/opt/llvm/lib/cmake/llvm --- Configuring done --- Generating done --- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/Passes/Debug -~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis -pushd ../../Debug && make QubitAllocationAnalysis && popd || popd -~/Documents/Projects/qsharp-compiler/src/Passes/Debug ~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis -Consolidate compiler generated dependencies of target QubitAllocationAnalysis -[100%] Built target QubitAllocationAnalysis -~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis -pushd ../../Debug && make ExpandStaticAllocation && popd || popd -~/Documents/Projects/qsharp-compiler/src/Passes/Debug ~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis -Consolidate compiler generated dependencies of target ExpandStaticAllocation -[100%] Built target ExpandStaticAllocation -~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis -opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib \ - -load-pass-plugin ../../Debug/libs/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll -; ModuleID = 'analysis-example.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Array = type opaque -%String = type opaque - -define internal fastcc void @Example__Main__body() unnamed_addr { -entry: - call fastcc void @Example__QuantumProgram__body(i64 3, i64 2, i64 1) - call fastcc void @Example__QuantumProgram__body(i64 4, i64 9, i64 4) - ret void -} - -define internal fastcc void @Example__QuantumProgram__body(i64 %x, i64 %h, i64 %g) unnamed_addr { -entry: - %.neg = xor i64 %x, -1 - %.neg1 = mul i64 %.neg, %x - %z.neg = add i64 %.neg1, 47 - %y = mul i64 %x, 3 - %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) - %0 = add i64 %y, -2 - %1 = lshr i64 %0, 1 - %2 = add i64 %z.neg, %1 - %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) - %3 = sub i64 %y, %g - %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) - %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %h) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) - %4 = call fastcc i64 @Example__X__body(i64 %x) - %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) - br label %header__1 - -header__1: ; preds = %header__1, %entry - %idxIteration = phi i64 [ 0, %entry ], [ %5, %header__1 ] - %.not = icmp sgt i64 %idxIteration, %g - %5 = add i64 %idxIteration, 1 - br i1 %.not, label %exit__1, label %header__1 - -exit__1: ; preds = %header__1 - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits0) - ret void -} - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -; Function Attrs: norecurse nounwind readnone willreturn -define internal fastcc i64 @Example__X__body(i64 %value) unnamed_addr #0 { -entry: - %0 = mul i64 %value, 3 - ret i64 %0 -} - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define i64 @Example__Main__Interop() local_unnamed_addr #1 { -entry: - call fastcc void @Example__Main__body() - ret i64 0 -} - -define void @Example__Main() local_unnamed_addr #2 { -entry: - call fastcc void @Example__Main__body() - %0 = call %String* @__quantum__rt__int_to_string(i64 0) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr - -define internal fastcc void @Example__QuantumProgram__body.1() unnamed_addr { -entry: - %.neg = xor i64 3, -1 - %.neg1 = mul i64 %.neg, 3 - %z.neg = add i64 %.neg1, 47 - %y = mul i64 3, 3 - %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) - %0 = add i64 %y, -2 - %1 = lshr i64 %0, 1 - %2 = add i64 %z.neg, %1 - %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) - %3 = sub i64 %y, 1 - %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) - %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) - %4 = call fastcc i64 @Example__X__body(i64 3) - %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) - br label %header__1 - -header__1: ; preds = %header__1, %entry - %idxIteration = phi i64 [ 0, %entry ], [ %5, %header__1 ] - %.not = icmp sgt i64 %idxIteration, 1 - %5 = add i64 %idxIteration, 1 - br i1 %.not, label %exit__1, label %header__1 - -exit__1: ; preds = %header__1 - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits0) - ret void -} - -define internal fastcc void @Example__QuantumProgram__body.2() unnamed_addr { -entry: - %.neg = xor i64 4, -1 - %.neg1 = mul i64 %.neg, 4 - %z.neg = add i64 %.neg1, 47 - %y = mul i64 4, 3 - %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) - %0 = add i64 %y, -2 - %1 = lshr i64 %0, 1 - %2 = add i64 %z.neg, %1 - %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) - %3 = sub i64 %y, 4 - %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) - %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) - %4 = call fastcc i64 @Example__X__body(i64 4) - %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) - br label %header__1 - -header__1: ; preds = %header__1, %entry - %idxIteration = phi i64 [ 0, %entry ], [ %5, %header__1 ] - %.not = icmp sgt i64 %idxIteration, 4 - %5 = add i64 %idxIteration, 1 - br i1 %.not, label %exit__1, label %header__1 - -exit__1: ; preds = %header__1 - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits0) - ret void -} - -attributes #0 = { norecurse nounwind readnone willreturn } -attributes #1 = { "InteropFriendly" } -attributes #2 = { "EntryPoint" } From 15ff3795722ec426ecd15a7a7c760a7f3fe937d3 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 3 Aug 2021 12:42:36 +0200 Subject: [PATCH 049/106] WiP replacement pass --- .../examples/ClassicalIrCommandline/Makefile | 5 +- .../examples/ClassicalIrCommandline/README.md | 12 +- .../classical-program.c | 2 +- .../classical-program.ll | 165 ++--- .../ConstSizeArray/ConstSizeArray.qs | 4 - .../examples/QubitAllocationAnalysis/Makefile | 6 + .../analysis-example.ll | 455 ++------------ .../InstructionReplacement.cpp | 56 ++ .../InstructionReplacement.hpp | 53 ++ .../LibInstructionReplacement.cpp | 38 ++ .../libs/InstructionReplacement/Pattern.cpp | 36 ++ .../libs/InstructionReplacement/Pattern.hpp | 117 ++++ .../InstructionReplacement/SPECIFICATION.md | 1 + src/Passes/libs/OpsCounter/OpsCounter.cpp | 122 ++-- .../QubitAllocationAnalysis.cpp | 563 +++++++++--------- 15 files changed, 757 insertions(+), 878 deletions(-) create mode 100644 src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp create mode 100644 src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp create mode 100644 src/Passes/libs/InstructionReplacement/LibInstructionReplacement.cpp create mode 100644 src/Passes/libs/InstructionReplacement/Pattern.cpp create mode 100644 src/Passes/libs/InstructionReplacement/Pattern.hpp create mode 100644 src/Passes/libs/InstructionReplacement/SPECIFICATION.md diff --git a/src/Passes/examples/ClassicalIrCommandline/Makefile b/src/Passes/examples/ClassicalIrCommandline/Makefile index 1dd41d69e7..2deedec1bc 100644 --- a/src/Passes/examples/ClassicalIrCommandline/Makefile +++ b/src/Passes/examples/ClassicalIrCommandline/Makefile @@ -1,13 +1,12 @@ emit-llvm: - clang -O0 -S -emit-llvm classical-program.c -o classical-program.ll + clang -O0 -fno-inline -S -emit-llvm classical-program.c -o classical-program.ll emit-llvm-bc: clang -O0 -c -emit-llvm classical-program.c -o classical-program.bc debug-ng-pass-mac: emit-llvm-bc - opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -debug --passes="operation-counter" -disable-output classical-program.bc - + opt -load-pass-plugin ../../Debug/libOpsCounter.dylib --passes="print" -disable-output classical-program.bc clean: diff --git a/src/Passes/examples/ClassicalIrCommandline/README.md b/src/Passes/examples/ClassicalIrCommandline/README.md index 0b0d3185ac..67a9133371 100644 --- a/src/Passes/examples/ClassicalIrCommandline/README.md +++ b/src/Passes/examples/ClassicalIrCommandline/README.md @@ -19,18 +19,12 @@ whereas the latter is generated by executing: This generates a nice and short IR which makes not too overwhelming to understand what is going on. -## Legacy passes - -This part assumes that you have built the Passes library. - -```sh -opt -load ../../{Debug,Release}/libQSharpPasses.{dylib,so} -legacy-operation-counter -analyze classical-program.ll -``` - ## Next-gen passes This part assumes that you have built the Passes library. ```sh -opt -load-pass-plugin ../../{Debug,Release}/libs/libQSharpPasses.{dylib,so} --passes="print" -disable-output classical-program.bc +opt -load-pass-plugin ../../{Debug,Release}/libs/libOpsCounter.{dylib,so} --passes="print" -disable-output classical-program.bc ``` + +opt -O3 -S classical-program.ll diff --git a/src/Passes/examples/ClassicalIrCommandline/classical-program.c b/src/Passes/examples/ClassicalIrCommandline/classical-program.c index ae56c14d68..7de7c1287c 100644 --- a/src/Passes/examples/ClassicalIrCommandline/classical-program.c +++ b/src/Passes/examples/ClassicalIrCommandline/classical-program.c @@ -3,7 +3,7 @@ int foo(int x) return x; } -void bar(int x, int y) +inline void bar(int x, int y) { foo(x + y); } diff --git a/src/Passes/examples/ClassicalIrCommandline/classical-program.ll b/src/Passes/examples/ClassicalIrCommandline/classical-program.ll index 5ad71d9d0b..d7464c030d 100644 --- a/src/Passes/examples/ClassicalIrCommandline/classical-program.ll +++ b/src/Passes/examples/ClassicalIrCommandline/classical-program.ll @@ -1,123 +1,58 @@ -; ModuleID = 'classical-program.cpp' -source_filename = "classical-program.cpp" +; ModuleID = 'classical-program.c' +source_filename = "classical-program.c" target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx11.0.0" -%"class.std::__1::basic_ostream" = type { i32 (...)**, %"class.std::__1::basic_ios.base" } -%"class.std::__1::basic_ios.base" = type <{ %"class.std::__1::ios_base", %"class.std::__1::basic_ostream"*, i32 }> -%"class.std::__1::ios_base" = type { i32 (...)**, i32, i64, i64, i32, i32, i8*, i8*, void (i32, %"class.std::__1::ios_base"*, i32)**, i32*, i64, i64, i64*, i64, i64, i8**, i64, i64 } -%"class.std::__1::locale::id" = type <{ %"struct.std::__1::once_flag", i32, [4 x i8] }> -%"struct.std::__1::once_flag" = type { i64 } -%"class.std::__1::locale" = type { %"class.std::__1::locale::__imp"* } -%"class.std::__1::locale::__imp" = type opaque -%"class.std::__1::locale::facet" = type { %"class.std::__1::__shared_count" } -%"class.std::__1::__shared_count" = type { i32 (...)**, i64 } -%"class.std::__1::ctype" = type <{ %"class.std::__1::locale::facet", i32*, i8, [7 x i8] }> - -@_ZNSt3__14coutE = external global %"class.std::__1::basic_ostream", align 8 -@_ZNSt3__15ctypeIcE2idE = external global %"class.std::__1::locale::id", align 8 - -; Function Attrs: norecurse ssp uwtable mustprogress -define dso_local i32 @main() local_unnamed_addr #0 personality i32 (...)* @__gxx_personality_v0 { - %1 = alloca %"class.std::__1::locale", align 8 - %2 = tail call i32 @_Z9fibonaccii(i32 3) - %3 = tail call nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEi(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8) @_ZNSt3__14coutE, i32 %2) - %4 = bitcast %"class.std::__1::basic_ostream"* %3 to i8** - %5 = load i8*, i8** %4, align 8, !tbaa !3 - %6 = getelementptr i8, i8* %5, i64 -24 - %7 = bitcast i8* %6 to i64* - %8 = load i64, i64* %7, align 8 - %9 = bitcast %"class.std::__1::basic_ostream"* %3 to i8* - %10 = getelementptr inbounds i8, i8* %9, i64 %8 - %11 = bitcast %"class.std::__1::locale"* %1 to i8* - call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %11) #5 - %12 = bitcast i8* %10 to %"class.std::__1::ios_base"* - call void @_ZNKSt3__18ios_base6getlocEv(%"class.std::__1::locale"* nonnull sret(%"class.std::__1::locale") align 8 %1, %"class.std::__1::ios_base"* nonnull dereferenceable(136) %12) - %13 = invoke %"class.std::__1::locale::facet"* @_ZNKSt3__16locale9use_facetERNS0_2idE(%"class.std::__1::locale"* nonnull dereferenceable(8) %1, %"class.std::__1::locale::id"* nonnull align 8 dereferenceable(12) @_ZNSt3__15ctypeIcE2idE) - to label %14 unwind label %21 - -14: ; preds = %0 - %15 = bitcast %"class.std::__1::locale::facet"* %13 to %"class.std::__1::ctype"* - %16 = bitcast %"class.std::__1::locale::facet"* %13 to i8 (%"class.std::__1::ctype"*, i8)*** - %17 = load i8 (%"class.std::__1::ctype"*, i8)**, i8 (%"class.std::__1::ctype"*, i8)*** %16, align 8, !tbaa !3 - %18 = getelementptr inbounds i8 (%"class.std::__1::ctype"*, i8)*, i8 (%"class.std::__1::ctype"*, i8)** %17, i64 7 - %19 = load i8 (%"class.std::__1::ctype"*, i8)*, i8 (%"class.std::__1::ctype"*, i8)** %18, align 8 - %20 = invoke signext i8 %19(%"class.std::__1::ctype"* nonnull dereferenceable(25) %15, i8 signext 10) - to label %23 unwind label %21 - -21: ; preds = %14, %0 - %22 = landingpad { i8*, i32 } - cleanup - call void @_ZNSt3__16localeD1Ev(%"class.std::__1::locale"* nonnull dereferenceable(8) %1) #5 - call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %11) #5 - resume { i8*, i32 } %22 - -23: ; preds = %14 - call void @_ZNSt3__16localeD1Ev(%"class.std::__1::locale"* nonnull dereferenceable(8) %1) #5 - call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %11) #5 - %24 = call nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE3putEc(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8) %3, i8 signext %20) - %25 = call nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE5flushEv(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8) %3) - ret i32 0 +; Function Attrs: noinline nounwind optnone ssp uwtable +define i32 @foo(i32 %0) #0 !dbg !8 { + %2 = alloca i32, align 4 + store i32 %0, i32* %2, align 4 + call void @llvm.dbg.declare(metadata i32* %2, metadata !12, metadata !DIExpression()), !dbg !13 + %3 = load i32, i32* %2, align 4, !dbg !14 + ret i32 %3, !dbg !15 } -declare nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEi(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8), i32) local_unnamed_addr #1 +; Function Attrs: nounwind readnone speculatable willreturn +declare void @llvm.dbg.declare(metadata, metadata, metadata) #1 -; Function Attrs: ssp uwtable mustprogress -define linkonce_odr i32 @_Z9fibonaccii(i32 %0) local_unnamed_addr #2 { - %2 = icmp slt i32 %0, 2 - br i1 %2, label %13, label %3 - -3: ; preds = %1, %3 - %4 = phi i32 [ %8, %3 ], [ %0, %1 ] - %5 = phi i32 [ %9, %3 ], [ 0, %1 ] - %6 = add nsw i32 %4, -1 - %7 = tail call i32 @_Z9fibonaccii(i32 %6) - %8 = add nsw i32 %4, -2 - %9 = add nsw i32 %7, %5 - %10 = icmp slt i32 %4, 4 - br i1 %10, label %11, label %3 - -11: ; preds = %3 - %12 = add i32 %9, 1 - br label %13 - -13: ; preds = %11, %1 - %14 = phi i32 [ 1, %1 ], [ %12, %11 ] - ret i32 %14 +; Function Attrs: noinline nounwind optnone ssp uwtable +define i32 @main() #0 !dbg !16 { + %1 = alloca i32, align 4 + store i32 0, i32* %1, align 4 + %2 = call i32 @foo(i32 2), !dbg !19 + call void @bar(i32 3, i32 2), !dbg !20 + ret i32 0, !dbg !21 } -declare nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE3putEc(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8), i8 signext) local_unnamed_addr #1 - -declare nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE5flushEv(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8)) local_unnamed_addr #1 - -; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #3 - -declare void @_ZNKSt3__18ios_base6getlocEv(%"class.std::__1::locale"* sret(%"class.std::__1::locale") align 8, %"class.std::__1::ios_base"* nonnull dereferenceable(136)) local_unnamed_addr #1 - -declare i32 @__gxx_personality_v0(...) - -; Function Attrs: nounwind -declare void @_ZNSt3__16localeD1Ev(%"class.std::__1::locale"* nonnull dereferenceable(8)) unnamed_addr #4 - -; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #3 - -declare %"class.std::__1::locale::facet"* @_ZNKSt3__16locale9use_facetERNS0_2idE(%"class.std::__1::locale"* nonnull dereferenceable(8), %"class.std::__1::locale::id"* nonnull align 8 dereferenceable(12)) local_unnamed_addr #1 - -attributes #0 = { norecurse ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #2 = { ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #3 = { argmemonly nofree nosync nounwind willreturn } -attributes #4 = { nounwind "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #5 = { nounwind } - -!llvm.module.flags = !{!0, !1} -!llvm.ident = !{!2} - -!0 = !{i32 1, !"wchar_size", i32 4} -!1 = !{i32 7, !"PIC Level", i32 2} -!2 = !{!"Homebrew clang version 12.0.1"} -!3 = !{!4, !4, i64 0} -!4 = !{!"vtable pointer", !5, i64 0} -!5 = !{!"Simple C++ TBAA"} +declare void @bar(i32, i32) #2 + +attributes #0 = { noinline nounwind optnone ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind readnone speculatable willreturn } +attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3, !4, !5, !6} +!llvm.ident = !{!7} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 11.1.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None, sysroot: "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk", sdk: "MacOSX.sdk") +!1 = !DIFile(filename: "classical-program.c", directory: "/Users/tfr/Documents/Projects/qsharp-compiler/src/Passes/examples/ClassicalIrCommandline") +!2 = !{} +!3 = !{i32 7, !"Dwarf Version", i32 4} +!4 = !{i32 2, !"Debug Info Version", i32 3} +!5 = !{i32 1, !"wchar_size", i32 4} +!6 = !{i32 7, !"PIC Level", i32 2} +!7 = !{!"clang version 11.1.0"} +!8 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !9, scopeLine: 2, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !2) +!9 = !DISubroutineType(types: !10) +!10 = !{!11, !11} +!11 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) +!12 = !DILocalVariable(name: "x", arg: 1, scope: !8, file: !1, line: 1, type: !11) +!13 = !DILocation(line: 1, column: 13, scope: !8) +!14 = !DILocation(line: 3, column: 10, scope: !8) +!15 = !DILocation(line: 3, column: 3, scope: !8) +!16 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 11, type: !17, scopeLine: 12, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !2) +!17 = !DISubroutineType(types: !18) +!18 = !{!11} +!19 = !DILocation(line: 13, column: 3, scope: !16) +!20 = !DILocation(line: 14, column: 3, scope: !16) +!21 = !DILocation(line: 16, column: 3, scope: !16) diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index 0b5655ddec..99fe9b8162 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -2,7 +2,6 @@ namespace Example { @EntryPoint() operation Main() : Int { - QuantumProgram(3,2,1); QuantumProgram(4,X(2),4); return 0; @@ -23,8 +22,5 @@ namespace Example { use qubits3 = Qubit[h]; use qubits4 = Qubit[X(x)]; - for idxIteration in 0..g { - //Message(idxIteration); - } } } \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index feedf8753f..d739337e0a 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -6,6 +6,9 @@ run-expand: build-qaa build-esa analysis-example.ll run: build-qaa analysis-example.ll opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +run-replace: build-ir analysis-example.ll + opt -load-pass-plugin ../../Debug/libs/libInstructionReplacement.dylib --passes="instruction-replacement" -disable-output analysis-example.ll + build-prepare: pushd ../../ && mkdir -p Debug && cd Debug && cmake ..&& popd || popd @@ -16,6 +19,9 @@ build-qaa: build-prepare build-esa: build-prepare pushd ../../Debug && make ExpandStaticAllocation && popd || popd +build-ir: build-prepare + pushd ../../Debug && make InstructionReplacement && popd || popd + analysis-example.ll: cd ConstSizeArray && make analysis-example.ll diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll index 6f6c98c8e0..a76121d5cb 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -1,438 +1,85 @@ ; ModuleID = 'qir/ConstSizeArray.ll' source_filename = "qir/ConstSizeArray.ll" -%Tuple = type opaque -%Qubit = type opaque %Array = type opaque -%Result = type opaque -%Callable = type opaque %String = type opaque -@Microsoft__Quantum__Qir__Emission__M = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Qir__Emission__M__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] -@0 = internal constant [3 x i8] c", \00" -@1 = internal constant [2 x i8] c"[\00" -@2 = internal constant [2 x i8] c"]\00" - -declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) local_unnamed_addr - -declare void @__quantum__qis__cnot__adj(%Qubit*, %Qubit*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -declare %Tuple* @__quantum__rt__tuple_create(i64) local_unnamed_addr - -declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) local_unnamed_addr - -define internal fastcc %Result* @Microsoft__Quantum__Qir__Emission__M__body(%Qubit* %q) unnamed_addr { +define internal fastcc void @Example__Main__body() unnamed_addr { entry: - %0 = call %Result* @__quantum__qis__m__body(%Qubit* %q) - ret %Result* %0 -} - -declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr - -define internal fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %a, %Qubit* %b, %Qubit* %c) unnamed_addr { -entry: - call void @__quantum__qis__cnot__body(%Qubit* %c, %Qubit* %b) - call void @__quantum__qis__cnot__body(%Qubit* %c, %Qubit* %a) - call void @__quantum__qis__toffoli__body(%Qubit* %a, %Qubit* %b, %Qubit* %c) + call fastcc void @Example__QuantumProgram__body(i64 3, i64 2, i64 1) + %0 = call fastcc i64 @Example__X__body(i64 2) + call fastcc void @Example__QuantumProgram__body(i64 4, i64 %0, i64 4) ret void } -declare void @__quantum__qis__toffoli__body(%Qubit*, %Qubit*, %Qubit*) local_unnamed_addr - -define internal fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %a, %Qubit* %b, %Qubit* %c) unnamed_addr { +define internal fastcc void @Example__QuantumProgram__body(i64 %x, i64 %h, i64 %g) unnamed_addr { entry: - call void @__quantum__qis__toffoli__adj(%Qubit* %a, %Qubit* %b, %Qubit* %c) - call void @__quantum__qis__cnot__adj(%Qubit* %c, %Qubit* %a) - call void @__quantum__qis__cnot__adj(%Qubit* %c, %Qubit* %b) + %.neg = xor i64 %x, -1 + %.neg1 = mul i64 %.neg, %x + %z.neg = add i64 %.neg1, 47 + %y = mul i64 %x, 3 + %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) + %0 = add i64 %y, -2 + %1 = lshr i64 %0, 1 + %2 = add i64 %z.neg, %1 + %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) + %3 = sub i64 %y, %g + %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) + %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %h) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) + %4 = call fastcc i64 @Example__X__body(i64 %x) + %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits0) ret void } -declare void @__quantum__qis__toffoli__adj(%Qubit*, %Qubit*, %Qubit*) local_unnamed_addr - -define internal fastcc %Array* @Microsoft__Quantum__Qir__Emission__RunAdder__body() unnamed_addr { +; Function Attrs: norecurse nounwind readnone +define internal fastcc i64 @Example__X__body(i64 %value) unnamed_addr #0 { entry: - %a = call %Array* @__quantum__rt__qubit_allocate_array(i64 4) - call void @__quantum__rt__array_update_alias_count(%Array* %a, i32 1) - %b = call %Array* @__quantum__rt__qubit_allocate_array(i64 4) - call void @__quantum__rt__array_update_alias_count(%Array* %b, i32 1) - %cin = call %Qubit* @__quantum__rt__qubit_allocate() - %cout = call %Qubit* @__quantum__rt__qubit_allocate() - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) - %1 = bitcast i8* %0 to %Qubit** - %q = load %Qubit*, %Qubit** %1, align 8 - call void @__quantum__qis__x__body(%Qubit* %q) - %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %b) - %3 = add i64 %2, -1 - %.not1 = icmp slt i64 %3, 0 - br i1 %.not1, label %exit__1, label %body__1 - -body__1: ; preds = %entry, %body__1 - %4 = phi i64 [ %7, %body__1 ], [ 0, %entry ] - %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 %4) - %6 = bitcast i8* %5 to %Qubit** - %q__1 = load %Qubit*, %Qubit** %6, align 8 - call void @__quantum__qis__x__body(%Qubit* %q__1) - %7 = add i64 %4, 1 - %.not = icmp sgt i64 %7, %3 - br i1 %.not, label %exit__1, label %body__1 - -exit__1: ; preds = %body__1, %entry - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 0) - %9 = bitcast i8* %8 to %Qubit** - %10 = load %Qubit*, %Qubit** %9, align 8 - %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) - %12 = bitcast i8* %11 to %Qubit** - %13 = load %Qubit*, %Qubit** %12, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %cin, %Qubit* %10, %Qubit* %13) - %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) - %15 = bitcast i8* %14 to %Qubit** - %16 = load %Qubit*, %Qubit** %15, align 8 - %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 1) - %18 = bitcast i8* %17 to %Qubit** - %19 = load %Qubit*, %Qubit** %18, align 8 - %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) - %21 = bitcast i8* %20 to %Qubit** - %22 = load %Qubit*, %Qubit** %21, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %16, %Qubit* %19, %Qubit* %22) - %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) - %24 = bitcast i8* %23 to %Qubit** - %25 = load %Qubit*, %Qubit** %24, align 8 - %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 2) - %27 = bitcast i8* %26 to %Qubit** - %28 = load %Qubit*, %Qubit** %27, align 8 - %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) - %30 = bitcast i8* %29 to %Qubit** - %31 = load %Qubit*, %Qubit** %30, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %25, %Qubit* %28, %Qubit* %31) - %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) - %33 = bitcast i8* %32 to %Qubit** - %34 = load %Qubit*, %Qubit** %33, align 8 - %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 3) - %36 = bitcast i8* %35 to %Qubit** - %37 = load %Qubit*, %Qubit** %36, align 8 - %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 3) - %39 = bitcast i8* %38 to %Qubit** - %40 = load %Qubit*, %Qubit** %39, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %34, %Qubit* %37, %Qubit* %40) - %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 3) - %42 = bitcast i8* %41 to %Qubit** - %c = load %Qubit*, %Qubit** %42, align 8 - call void @__quantum__qis__cnot__body(%Qubit* %c, %Qubit* %cout) - %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) - %44 = bitcast i8* %43 to %Qubit** - %45 = load %Qubit*, %Qubit** %44, align 8 - %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 3) - %47 = bitcast i8* %46 to %Qubit** - %48 = load %Qubit*, %Qubit** %47, align 8 - %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 3) - %50 = bitcast i8* %49 to %Qubit** - %51 = load %Qubit*, %Qubit** %50, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %45, %Qubit* %48, %Qubit* %51) - %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) - %53 = bitcast i8* %52 to %Qubit** - %54 = load %Qubit*, %Qubit** %53, align 8 - %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 2) - %56 = bitcast i8* %55 to %Qubit** - %57 = load %Qubit*, %Qubit** %56, align 8 - %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) - %59 = bitcast i8* %58 to %Qubit** - %60 = load %Qubit*, %Qubit** %59, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %54, %Qubit* %57, %Qubit* %60) - %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) - %62 = bitcast i8* %61 to %Qubit** - %63 = load %Qubit*, %Qubit** %62, align 8 - %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 1) - %65 = bitcast i8* %64 to %Qubit** - %66 = load %Qubit*, %Qubit** %65, align 8 - %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) - %68 = bitcast i8* %67 to %Qubit** - %69 = load %Qubit*, %Qubit** %68, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %63, %Qubit* %66, %Qubit* %69) - %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 0) - %71 = bitcast i8* %70 to %Qubit** - %72 = load %Qubit*, %Qubit** %71, align 8 - %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) - %74 = bitcast i8* %73 to %Qubit** - %75 = load %Qubit*, %Qubit** %74, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %cin, %Qubit* %72, %Qubit* %75) - %76 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* nonnull @Microsoft__Quantum__Qir__Emission__M, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) - %77 = call fastcc %Array* @Microsoft__Quantum__Qir__Emission___73da7dcac81a47ddabb1a0e30be3dfdb_ForEach__body(%Callable* %76, %Array* %b) - call void @__quantum__rt__array_update_alias_count(%Array* %b, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %a, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %76, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %76, i32 -1) - call void @__quantum__rt__qubit_release(%Qubit* %cin) - call void @__quantum__rt__qubit_release(%Qubit* %cout) - call void @__quantum__rt__qubit_release_array(%Array* %b) - call void @__quantum__rt__qubit_release_array(%Array* %a) - ret %Array* %77 + %0 = mul i64 %value, 3 + ret i64 %0 } -declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr - declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr -declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr - -declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr - -declare i64 @__quantum__rt__array_get_size_1d(%Array*) local_unnamed_addr - -define internal fastcc %Array* @Microsoft__Quantum__Qir__Emission___73da7dcac81a47ddabb1a0e30be3dfdb_ForEach__body(%Callable* %action, %Array* %array) unnamed_addr { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) - %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) - call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) - call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 1) - %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) - %2 = add i64 %1, -1 - %.not9 = icmp slt i64 %2, 0 - br i1 %.not9, label %exit__1, label %body__1 - -body__1: ; preds = %entry, %exit__4 - %3 = phi i64 [ %32, %exit__4 ], [ 0, %entry ] - %res.010 = phi %Array* [ %14, %exit__4 ], [ %0, %entry ] - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %3) - %5 = bitcast i8* %4 to %Qubit** - %item = load %Qubit*, %Qubit** %5, align 8 - %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) - %8 = bitcast i8* %7 to %Result** - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 8) - %10 = bitcast %Tuple* %9 to %Qubit** - store %Qubit* %item, %Qubit** %10, align 8 - %11 = call %Tuple* @__quantum__rt__tuple_create(i64 8) - call void @__quantum__rt__callable_invoke(%Callable* %action, %Tuple* %9, %Tuple* %11) - %12 = bitcast %Tuple* %11 to %Result** - %13 = load %Result*, %Result** %12, align 8 - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) - store %Result* %13, %Result** %8, align 8 - %14 = call %Array* @__quantum__rt__array_concatenate(%Array* %res.010, %Array* %6) - %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) - %16 = add i64 %15, -1 - %.not57 = icmp slt i64 %16, 0 - br i1 %.not57, label %exit__2, label %body__2 - -exit__1: ; preds = %exit__4, %entry - %res.0.lcssa = phi %Array* [ %0, %entry ], [ %14, %exit__4 ] - call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %res.0.lcssa, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) - ret %Array* %res.0.lcssa - -body__2: ; preds = %body__1, %body__2 - %17 = phi i64 [ %21, %body__2 ], [ 0, %body__1 ] - %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 %17) - %19 = bitcast i8* %18 to %Result** - %20 = load %Result*, %Result** %19, align 8 - call void @__quantum__rt__result_update_reference_count(%Result* %20, i32 1) - %21 = add i64 %17, 1 - %.not5 = icmp sgt i64 %21, %16 - br i1 %.not5, label %exit__2, label %body__2 - -exit__2: ; preds = %body__2, %body__1 - call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %res.010, i32 -1) - %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) - %23 = bitcast i8* %22 to %Result** - %24 = load %Result*, %Result** %23, align 8 - call void @__quantum__rt__result_update_reference_count(%Result* %24, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) - %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %res.010) - %26 = add i64 %25, -1 - %.not68 = icmp slt i64 %26, 0 - br i1 %.not68, label %exit__4, label %body__4 - -body__4: ; preds = %exit__2, %body__4 - %27 = phi i64 [ %31, %body__4 ], [ 0, %exit__2 ] - %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %res.010, i64 %27) - %29 = bitcast i8* %28 to %Result** - %30 = load %Result*, %Result** %29, align 8 - call void @__quantum__rt__result_update_reference_count(%Result* %30, i32 -1) - %31 = add i64 %27, 1 - %.not6 = icmp sgt i64 %31, %26 - br i1 %.not6, label %exit__4, label %body__4 - -exit__4: ; preds = %body__4, %exit__2 - call void @__quantum__rt__array_update_reference_count(%Array* %res.010, i32 -1) - %32 = add i64 %3, 1 - %.not = icmp sgt i64 %32, %2 - br i1 %.not, label %exit__1, label %body__1 -} - -define internal void @Microsoft__Quantum__Qir__Emission__M__body__wrapper(%Tuple* nocapture readnone %capture-tuple, %Tuple* nocapture readonly %arg-tuple, %Tuple* nocapture %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to %Qubit** - %1 = load %Qubit*, %Qubit** %0, align 8 - %2 = call fastcc %Result* @Microsoft__Quantum__Qir__Emission__M__body(%Qubit* %1) - %3 = bitcast %Tuple* %result-tuple to %Result** - store %Result* %2, %Result** %3, align 8 - ret void -} - -declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) local_unnamed_addr - -declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) local_unnamed_addr - -declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) local_unnamed_addr - -declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) local_unnamed_addr - -declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) local_unnamed_addr - -declare %Array* @__quantum__rt__array_create_1d(i32, i64) local_unnamed_addr - -declare void @__quantum__rt__array_update_reference_count(%Array*, i32) local_unnamed_addr - -declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) local_unnamed_addr - -declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) local_unnamed_addr - -declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr -define { i64, i8* }* @Microsoft__Quantum__Qir__Emission__RunAdder__Interop() local_unnamed_addr #0 { +define i64 @Example__Main__Interop() local_unnamed_addr #1 { entry: - %0 = call fastcc %Array* @Microsoft__Quantum__Qir__Emission__RunAdder__body() - %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) - %2 = call i8* @__quantum__rt__memory_allocate(i64 %1) - %3 = ptrtoint i8* %2 to i64 - %4 = add i64 %1, -1 - %.not5 = icmp slt i64 %4, 0 - br i1 %.not5, label %exit__1, label %body__1 - -body__1: ; preds = %entry, %body__1 - %5 = phi i64 [ %14, %body__1 ], [ 0, %entry ] - %6 = add i64 %5, %3 - %7 = inttoptr i64 %6 to i8* - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %5) - %9 = bitcast i8* %8 to %Result** - %10 = load %Result*, %Result** %9, align 8 - %11 = call %Result* @__quantum__rt__result_get_zero() - %12 = call i1 @__quantum__rt__result_equal(%Result* %10, %Result* %11) - %not. = xor i1 %12, true - %13 = sext i1 %not. to i8 - store i8 %13, i8* %7, align 1 - %14 = add i64 %5, 1 - %.not = icmp sgt i64 %14, %4 - br i1 %.not, label %exit__1, label %body__1 - -exit__1: ; preds = %body__1, %entry - %15 = call i8* @__quantum__rt__memory_allocate(i64 16) - %16 = bitcast i8* %15 to i64* - store i64 %1, i64* %16, align 4 - %17 = getelementptr i8, i8* %15, i64 8 - %18 = bitcast i8* %17 to i8** - store i8* %2, i8** %18, align 8 - %.not34 = icmp slt i64 %4, 0 - br i1 %.not34, label %exit__2, label %body__2 - -body__2: ; preds = %exit__1, %body__2 - %19 = phi i64 [ %23, %body__2 ], [ 0, %exit__1 ] - %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %19) - %21 = bitcast i8* %20 to %Result** - %22 = load %Result*, %Result** %21, align 8 - call void @__quantum__rt__result_update_reference_count(%Result* %22, i32 -1) - %23 = add i64 %19, 1 - %.not3 = icmp sgt i64 %23, %4 - br i1 %.not3, label %exit__2, label %body__2 - -exit__2: ; preds = %body__2, %exit__1 - %24 = bitcast i8* %15 to { i64, i8* }* - call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) - ret { i64, i8* }* %24 + call fastcc void @Example__Main__body() + ret i64 0 } -declare i8* @__quantum__rt__memory_allocate(i64) local_unnamed_addr - -declare %Result* @__quantum__rt__result_get_zero() local_unnamed_addr - -declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr - -define void @Microsoft__Quantum__Qir__Emission__RunAdder() local_unnamed_addr #1 { +define void @Example__Main() local_unnamed_addr #2 { entry: - %0 = call fastcc %Array* @Microsoft__Quantum__Qir__Emission__RunAdder__body() - %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i64 0, i64 0)) - %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @1, i64 0, i64 0)) - call void @__quantum__rt__string_update_reference_count(%String* %2, i32 1) - %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) - %4 = add i64 %3, -1 - %.not7 = icmp slt i64 %4, 0 - br i1 %.not7, label %exit__1, label %body__1 - -body__1: ; preds = %entry, %condContinue__1 - %5 = phi i64 [ %14, %condContinue__1 ], [ 0, %entry ] - %6 = phi %String* [ %13, %condContinue__1 ], [ %2, %entry ] - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %5) - %8 = bitcast i8* %7 to %Result** - %9 = load %Result*, %Result** %8, align 8 - %.not5 = icmp eq %String* %6, %2 - br i1 %.not5, label %condContinue__1, label %condTrue__1 - -condTrue__1: ; preds = %body__1 - %10 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %1) - call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) - br label %condContinue__1 - -condContinue__1: ; preds = %condTrue__1, %body__1 - %11 = phi %String* [ %10, %condTrue__1 ], [ %6, %body__1 ] - %12 = call %String* @__quantum__rt__result_to_string(%Result* %9) - %13 = call %String* @__quantum__rt__string_concatenate(%String* %11, %String* %12) - call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) - %14 = add i64 %5, 1 - %.not = icmp sgt i64 %14, %4 - br i1 %.not, label %exit__1, label %body__1 - -exit__1: ; preds = %condContinue__1, %entry - %.lcssa = phi %String* [ %2, %entry ], [ %13, %condContinue__1 ] - %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i64 0, i64 0)) - %16 = call %String* @__quantum__rt__string_concatenate(%String* %.lcssa, %String* %15) - call void @__quantum__rt__string_update_reference_count(%String* %.lcssa, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) - call void @__quantum__rt__message(%String* %16) - %.not46 = icmp slt i64 %4, 0 - br i1 %.not46, label %exit__2, label %body__2 - -body__2: ; preds = %exit__1, %body__2 - %17 = phi i64 [ %21, %body__2 ], [ 0, %exit__1 ] - %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %17) - %19 = bitcast i8* %18 to %Result** - %20 = load %Result*, %Result** %19, align 8 - call void @__quantum__rt__result_update_reference_count(%Result* %20, i32 -1) - %21 = add i64 %17, 1 - %.not4 = icmp sgt i64 %21, %4 - br i1 %.not4, label %exit__2, label %body__2 - -exit__2: ; preds = %body__2, %exit__1 - call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) ret void } declare void @__quantum__rt__message(%String*) local_unnamed_addr -declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr - -declare %String* @__quantum__rt__string_concatenate(%String*, %String*) local_unnamed_addr - -declare %String* @__quantum__rt__result_to_string(%Result*) local_unnamed_addr +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } +attributes #0 = { norecurse nounwind readnone } +attributes #1 = { "InteropFriendly" } +attributes #2 = { "EntryPoint" } diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp b/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp new file mode 100644 index 0000000000..7508c586fa --- /dev/null +++ b/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "InstructionReplacement/InstructionReplacement.hpp" + +#include "Llvm.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +llvm::PreservedAnalyses InstructionReplacementPass::run(llvm::Function &function, + llvm::FunctionAnalysisManager & /*fam*/) +{ + // Pass body + bool changed{false}; + for (auto &basic_block : function) + { + + instruction_stack_.clear(); + for (auto &instr : basic_block) + { + instruction_stack_.push_back(&instr); + if (match()) + { + changed = true; + std::cout << "FOUND REPLACEMENT" << std::endl; + } + } + } + + // llvm::errs() << "Implement your pass here: " << function.getName() << "\n"; + + return llvm::PreservedAnalyses::all(); +} + +bool InstructionReplacementPass::isRequired() +{ + return true; +} + +bool InstructionReplacementPass::match() const +{ + for (auto const &pattern : patterns_) + { + if (pattern.match(instruction_stack_)) + { + return true; + } + } + return false; +} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp b/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp new file mode 100644 index 0000000000..6c0dffb1ca --- /dev/null +++ b/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp @@ -0,0 +1,53 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#include "InstructionReplacement/Pattern.hpp" +#include "Llvm.hpp" + +#include + +namespace microsoft { +namespace quantum { + +class InstructionReplacementPass : public llvm::PassInfoMixin +{ +public: + using Instruction = llvm::Instruction; + using Patterns = std::vector; + using InstructionStack = Pattern::InstructionStack; + + InstructionReplacementPass() + { + Pattern pattern; + pattern.addPattern(std::make_unique("__quantum__rt__array_update_alias_count")); + patterns_.emplace_back(std::move(pattern)); + } + + /// Constructors and destructors + /// @{ + InstructionReplacementPass(InstructionReplacementPass const &) = default; + InstructionReplacementPass(InstructionReplacementPass &&) = default; + ~InstructionReplacementPass() = default; + /// @} + + /// Operators + /// @{ + InstructionReplacementPass &operator=(InstructionReplacementPass const &) = default; + InstructionReplacementPass &operator=(InstructionReplacementPass &&) = default; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); + static bool isRequired(); + /// @} + + bool match() const; + +private: + Patterns patterns_; + InstructionStack instruction_stack_{}; +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/InstructionReplacement/LibInstructionReplacement.cpp b/src/Passes/libs/InstructionReplacement/LibInstructionReplacement.cpp new file mode 100644 index 0000000000..a2c14df13e --- /dev/null +++ b/src/Passes/libs/InstructionReplacement/LibInstructionReplacement.cpp @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "InstructionReplacement/InstructionReplacement.hpp" + +#include "Llvm.hpp" + +#include +#include + +namespace { +llvm::PassPluginLibraryInfo getInstructionReplacementPluginInfo() +{ + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "InstructionReplacement", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the pass + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "instruction-replacement") + { + fpm.addPass(InstructionReplacementPass()); + return true; + } + + return false; + }); + }}; +} +} // namespace + +// Interface for loading the plugin +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return getInstructionReplacementPluginInfo(); +} diff --git a/src/Passes/libs/InstructionReplacement/Pattern.cpp b/src/Passes/libs/InstructionReplacement/Pattern.cpp new file mode 100644 index 0000000000..9dd26aa8c2 --- /dev/null +++ b/src/Passes/libs/InstructionReplacement/Pattern.cpp @@ -0,0 +1,36 @@ +#include "InstructionReplacement/Pattern.hpp" + +namespace microsoft { +namespace quantum { + +InstructionPattern::~InstructionPattern() = default; + +CallPattern::CallPattern(String const &name) + : name_{name} +{} +CallPattern::~CallPattern() = default; + +bool CallPattern::match(Instruction *instr) +{ + auto *call_instr = llvm::dyn_cast(instr); + if (call_instr == nullptr) + { + return false; + } + + auto target_function = call_instr->getCalledFunction(); + auto name = target_function->getName(); + + if (name != name_) + { + return false; + } + + // TODO: Check operands + llvm::errs() << "Found call to " << name << "\n"; + + return true; +} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/InstructionReplacement/Pattern.hpp b/src/Passes/libs/InstructionReplacement/Pattern.hpp new file mode 100644 index 0000000000..3127600b80 --- /dev/null +++ b/src/Passes/libs/InstructionReplacement/Pattern.hpp @@ -0,0 +1,117 @@ +#pragma once +#include "Llvm.hpp" + +#include +namespace microsoft { +namespace quantum { + +class InstructionPattern +{ +public: + using Instruction = llvm::Instruction; + using String = std::string; + class OperandPrototype + { + public: + enum MatchType + { + Any = 0, + ConstantInt, + ConstantFloat, + ConstantBool, + + Register, + NamedRegister, + AnonymousRegister, + + }; + + String name() const + { + return name_; + } + + private: + String name_; + // void * value_{nullptr}; + }; + using Operands = std::vector; + + /// @{ + InstructionPattern() = default; + InstructionPattern(InstructionPattern const &other) = default; + InstructionPattern(InstructionPattern &&other) = default; + InstructionPattern &operator=(InstructionPattern const &other) = default; + InstructionPattern &operator=(InstructionPattern &&other) = default; + /// @} + + virtual ~InstructionPattern(); + /// @{ + virtual bool match(Instruction *instr) = 0; + /// @} + + Operands const &operands() + { + return operands_; + } + +private: + Operands operands_; +}; + +class CallPattern : public InstructionPattern +{ +public: + using String = std::string; + CallPattern(String const &name); + + ~CallPattern() override; + + bool match(Instruction *instr) override; + +private: + String name_{}; +}; + +class Pattern +{ +public: + using Instruction = llvm::Instruction; + using MatchList = std::vector>; + using InstructionStack = std::vector; + + void addPattern(std::unique_ptr &&pattern) + { + patterns_.emplace_back(std::move(pattern)); + } + + bool match(InstructionStack const &stack) const + { + auto a = stack.size(); + auto b = patterns_.size(); + + while (a != 0 && b != 0) + { + --a; + --b; + auto const &s = stack[a]; + auto const &p = patterns_[b]; + if (!p->match(s)) + { + return false; + } + } + + llvm::errs() << "POSSIBLE MATCH\n"; + return true; + } + +private: + MatchList patterns_; +}; + +// Propposed syntax for establishing rules +// "name"_rule = ("add"_op(0_o, "value"_any ), +// "sub"_op(2_i32, "name"_reg )) => "noop"_op("value"_any, "name"_reg); +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/InstructionReplacement/SPECIFICATION.md b/src/Passes/libs/InstructionReplacement/SPECIFICATION.md new file mode 100644 index 0000000000..76db543de7 --- /dev/null +++ b/src/Passes/libs/InstructionReplacement/SPECIFICATION.md @@ -0,0 +1 @@ +# {InstructionReplacement} Specification diff --git a/src/Passes/libs/OpsCounter/OpsCounter.cpp b/src/Passes/libs/OpsCounter/OpsCounter.cpp index f642970b1a..56ae30bf9f 100644 --- a/src/Passes/libs/OpsCounter/OpsCounter.cpp +++ b/src/Passes/libs/OpsCounter/OpsCounter.cpp @@ -1,80 +1,82 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" - #include "OpsCounter/OpsCounter.hpp" +#include "Llvm.hpp" + #include #include -namespace microsoft +namespace microsoft { +namespace quantum { +OpsCounterAnalytics::Result OpsCounterAnalytics::run(llvm::Function &function, + llvm::FunctionAnalysisManager & /*unused*/) { -namespace quantum -{ - OpsCounterAnalytics::Result OpsCounterAnalytics::run( - llvm::Function& function, - llvm::FunctionAnalysisManager& /*unused*/) + OpsCounterAnalytics::Result opcode_map; + for (auto &basic_block : function) + { + for (auto &instruction : basic_block) { - OpsCounterAnalytics::Result opcode_map; - for (auto& basic_block : function) - { - for (auto& instruction : basic_block) - { - if (instruction.isDebugOrPseudoInst()) - { - continue; - } - auto name = instruction.getOpcodeName(); - - if (opcode_map.find(name) == opcode_map.end()) - { - opcode_map[instruction.getOpcodeName()] = 1; - } - else - { - opcode_map[instruction.getOpcodeName()]++; - } - } - } - - return opcode_map; - } + /* + TODO(tfr): Enbale in LLVM 12 or later + if (instruction.isDebugOrPseudoInst()) + { + continue; + } + */ - OpsCounterPrinter::OpsCounterPrinter(llvm::raw_ostream& out_stream) - : out_stream_(out_stream) - { + auto name = instruction.getOpcodeName(); + + if (opcode_map.find(name) == opcode_map.end()) + { + opcode_map[instruction.getOpcodeName()] = 1; + } + else + { + opcode_map[instruction.getOpcodeName()]++; + } } + } - llvm::PreservedAnalyses OpsCounterPrinter::run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) - { - auto& opcode_map = fam.getResult(function); + return opcode_map; +} - out_stream_ << "Stats for '" << function.getName() << "'\n"; - out_stream_ << "===========================\n"; +OpsCounterPrinter::OpsCounterPrinter(llvm::raw_ostream &out_stream) + : out_stream_(out_stream) +{} - constexpr auto STR1 = "Opcode"; - constexpr auto STR2 = "# Used"; - out_stream_ << llvm::format("%-15s %-8s\n", STR1, STR2); - out_stream_ << "---------------------------" - << "\n"; +llvm::PreservedAnalyses OpsCounterPrinter::run(llvm::Function & function, + llvm::FunctionAnalysisManager &fam) +{ + auto &opcode_map = fam.getResult(function); - for (auto const& instruction : opcode_map) - { - out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); - } - out_stream_ << "---------------------------" - << "\n\n"; + out_stream_ << "Stats for '" << function.getName() << "'\n"; + out_stream_ << "===========================\n"; - return llvm::PreservedAnalyses::all(); - } + constexpr auto STR1 = "Opcode"; + constexpr auto STR2 = "# Used"; + out_stream_ << llvm::format("%-15s %-8s\n", STR1, STR2); + out_stream_ << "---------------------------" + << "\n"; - bool OpsCounterPrinter::isRequired() - { - return true; - } + for (auto const &instruction : opcode_map) + { + out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), + instruction.second); + } + out_stream_ << "---------------------------" + << "\n\n"; + + return llvm::PreservedAnalyses::all(); +} + +bool OpsCounterPrinter::isRequired() +{ + return true; +} - llvm::AnalysisKey OpsCounterAnalytics::Key; +llvm::AnalysisKey OpsCounterAnalytics::Key; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp index 60ef6aaeb7..b24a10927d 100644 --- a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp +++ b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp @@ -1,321 +1,320 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" - #include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" +#include "Llvm.hpp" + #include #include #include -namespace microsoft -{ -namespace quantum -{ +namespace microsoft { +namespace quantum { - bool QubitAllocationAnalysisAnalytics::operandsConstant(Instruction const& instruction) const - { - // Default is true (i.e. the case of no operands) - bool ret = true; +bool QubitAllocationAnalysisAnalytics::operandsConstant(Instruction const &instruction) const +{ + // Default is true (i.e. the case of no operands) + bool ret = true; - // Checking that all oprands are constant - for (auto& op : instruction.operands()) - { + // Checking that all oprands are constant + for (auto &op : instruction.operands()) + { - // An operand is constant if its value was previously generated from - // a const expression ... - auto const_arg = constantness_dependencies_.find(op) != constantness_dependencies_.end(); + // An operand is constant if its value was previously generated from + // a const expression ... + auto const_arg = constantness_dependencies_.find(op) != constantness_dependencies_.end(); - // ... or if it is just a compile time constant. Note that we - // delibrately only consider integers. We may expand this - // to other constants once we have function support. - auto cst = llvm::dyn_cast(op); - auto is_constant = (cst != nullptr); + // ... or if it is just a compile time constant. Note that we + // delibrately only consider integers. We may expand this + // to other constants once we have function support. + auto cst = llvm::dyn_cast(op); + auto is_constant = (cst != nullptr); - ret = ret && (const_arg || is_constant); - } + ret = ret && (const_arg || is_constant); + } - return ret; - } + return ret; +} - void QubitAllocationAnalysisAnalytics::markPossibleConstant(Instruction& instruction) +void QubitAllocationAnalysisAnalytics::markPossibleConstant(Instruction &instruction) +{ + // Creating arg dependencies + ArgList all_dependencies{}; + for (auto &op : instruction.operands()) + { + // If the operand has dependecies ... + auto it = constantness_dependencies_.find(op); + if (it != constantness_dependencies_.end()) { - // Creating arg dependencies - ArgList all_dependencies{}; - for (auto& op : instruction.operands()) - { - // If the operand has dependecies ... - auto it = constantness_dependencies_.find(op); - if (it != constantness_dependencies_.end()) - { - // ... we add these as a dependency for the - // resulting instructions value - for (auto& arg : it->second) - { - all_dependencies.insert(arg); - } - } - } - - // Adding full list of dependices to the dependency graph - constantness_dependencies_.insert({&instruction, all_dependencies}); + // ... we add these as a dependency for the + // resulting instructions value + for (auto &arg : it->second) + { + all_dependencies.insert(arg); + } } + } - void QubitAllocationAnalysisAnalytics::analyseCall(Instruction& instruction) - { - // Skipping debug code - if (instruction.isDebugOrPseudoInst()) - { - return; - } - - // Recovering the call information - auto* call_instr = llvm::dyn_cast(&instruction); - if (call_instr == nullptr) - { - return; - } - - // Getting the name of the function being called - auto target_function = call_instr->getCalledFunction(); - auto name = target_function->getName(); + // Adding full list of dependices to the dependency graph + constantness_dependencies_.insert({&instruction, all_dependencies}); +} - // TODO(tfr): Make use of TargetLibraryInfo - if (name != "__quantum__rt__qubit_allocate_array") - { - return; - } - - // We expect only a single argument with the number - // of qubits allocated - if (call_instr->arg_size() != 1) - { - llvm::errs() << "Expected exactly one argument\n"; - return; - } - - // Next we extract the argument ... - auto argument = call_instr->getArgOperand(0); - if (argument == nullptr) - { - llvm::errs() << "Failed getting the size argument\n"; - return; - } - - // ... and checks whether it is a result of a dependant - // const expression - auto it = constantness_dependencies_.find(argument); - if (it != constantness_dependencies_.end()) +void QubitAllocationAnalysisAnalytics::analyseCall(Instruction &instruction) +{ + // Skipping debug code + /* + TODO(tfr): Enable this in LLVM 12 and upwards + if (instruction.isDebugOrPseudoInst()) + { + return; + } + */ + + // Recovering the call information + auto *call_instr = llvm::dyn_cast(&instruction); + if (call_instr == nullptr) + { + return; + } + + // Getting the name of the function being called + auto target_function = call_instr->getCalledFunction(); + auto name = target_function->getName(); + + // TODO(tfr): Make use of TargetLibraryInfo + if (name != "__quantum__rt__qubit_allocate_array") + { + return; + } + + // We expect only a single argument with the number + // of qubits allocated + if (call_instr->arg_size() != 1) + { + llvm::errs() << "Expected exactly one argument\n"; + return; + } + + // Next we extract the argument ... + auto argument = call_instr->getArgOperand(0); + if (argument == nullptr) + { + llvm::errs() << "Failed getting the size argument\n"; + return; + } + + // ... and checks whether it is a result of a dependant + // const expression + auto it = constantness_dependencies_.find(argument); + if (it != constantness_dependencies_.end()) + { + // If it is, we add the details to the result list + QubitArray qubit_array; + qubit_array.is_possibly_static = true; + qubit_array.variable_name = instruction.getName().str(); + qubit_array.depends_on = it->second; + + // Pushing to the result + results_.push_back(std::move(qubit_array)); + return; + } + + // Otherwise, it may be a static allocation based on a constant (or + // folded constant) + auto cst = llvm::dyn_cast(argument); + if (cst != nullptr) + { + QubitArray qubit_array; + qubit_array.is_possibly_static = true; + qubit_array.variable_name = instruction.getName().str(); + qubit_array.size = cst->getZExtValue(); + + // Pushing to the result + results_.push_back(std::move(qubit_array)); + + return; + } + + // If neither of the previous is the case, we are dealing with a non-static array + QubitArray qubit_array; + qubit_array.is_possibly_static = false; + qubit_array.variable_name = instruction.getName().str(); + + // Storing the result + results_.push_back(std::move(qubit_array)); +} + +void QubitAllocationAnalysisAnalytics::analyseFunction(llvm::Function &function) +{ + // Clearing results generated in a previous run + results_.clear(); + constantness_dependencies_.clear(); + + // Creating a list with function arguments + for (auto &arg : function.args()) + { + auto s = arg.getName().str(); + constantness_dependencies_.insert({&arg, {s}}); + } + + // Evaluating all expressions + for (auto &basic_block : function) + { + for (auto &instruction : basic_block) + { + auto opcode = instruction.getOpcode(); + switch (opcode) + { + case llvm::Instruction::Sub: + case llvm::Instruction::Add: + case llvm::Instruction::Mul: + case llvm::Instruction::Shl: + case llvm::Instruction::LShr: + case llvm::Instruction::AShr: + case llvm::Instruction::And: + case llvm::Instruction::Or: + case llvm::Instruction::Xor: + if (operandsConstant(instruction)) { - // If it is, we add the details to the result list - QubitArray qubit_array; - qubit_array.is_possibly_static = true; - qubit_array.variable_name = instruction.getName().str(); - qubit_array.depends_on = it->second; - - // Pushing to the result - results_.push_back(std::move(qubit_array)); - return; + markPossibleConstant(instruction); } + break; + case llvm::Instruction::Call: + analyseCall(instruction); + break; + // Unanalysed statements + case llvm::Instruction::Ret: + case llvm::Instruction::Br: + case llvm::Instruction::Switch: + case llvm::Instruction::IndirectBr: + case llvm::Instruction::Invoke: + case llvm::Instruction::Resume: + case llvm::Instruction::Unreachable: + case llvm::Instruction::CleanupRet: + case llvm::Instruction::CatchRet: + case llvm::Instruction::CatchSwitch: + case llvm::Instruction::CallBr: + case llvm::Instruction::FNeg: + case llvm::Instruction::FAdd: + case llvm::Instruction::FSub: + case llvm::Instruction::FMul: + case llvm::Instruction::UDiv: + case llvm::Instruction::SDiv: + case llvm::Instruction::FDiv: + case llvm::Instruction::URem: + case llvm::Instruction::SRem: + case llvm::Instruction::FRem: + case llvm::Instruction::Alloca: + case llvm::Instruction::Load: + case llvm::Instruction::Store: + case llvm::Instruction::GetElementPtr: + case llvm::Instruction::Fence: + case llvm::Instruction::AtomicCmpXchg: + case llvm::Instruction::AtomicRMW: + case llvm::Instruction::Trunc: + case llvm::Instruction::ZExt: + case llvm::Instruction::SExt: + case llvm::Instruction::FPToUI: + case llvm::Instruction::FPToSI: + case llvm::Instruction::UIToFP: + case llvm::Instruction::SIToFP: + case llvm::Instruction::FPTrunc: + case llvm::Instruction::FPExt: + case llvm::Instruction::PtrToInt: + case llvm::Instruction::IntToPtr: + case llvm::Instruction::BitCast: + case llvm::Instruction::AddrSpaceCast: + case llvm::Instruction::CleanupPad: + case llvm::Instruction::CatchPad: + case llvm::Instruction::ICmp: + case llvm::Instruction::FCmp: + case llvm::Instruction::PHI: + case llvm::Instruction::Select: + case llvm::Instruction::UserOp1: + case llvm::Instruction::UserOp2: + case llvm::Instruction::VAArg: + case llvm::Instruction::ExtractElement: + case llvm::Instruction::InsertElement: + case llvm::Instruction::ShuffleVector: + case llvm::Instruction::ExtractValue: + case llvm::Instruction::InsertValue: + case llvm::Instruction::LandingPad: + // End of Binary Ops + default: + break; + } + } + } +} - // Otherwise, it may be a static allocation based on a constant (or - // folded constant) - auto cst = llvm::dyn_cast(argument); - if (cst != nullptr) - { - QubitArray qubit_array; - qubit_array.is_possibly_static = true; - qubit_array.variable_name = instruction.getName().str(); - qubit_array.size = cst->getZExtValue(); - - // Pushing to the result - results_.push_back(std::move(qubit_array)); - - return; - } +QubitAllocationAnalysisAnalytics::Result QubitAllocationAnalysisAnalytics::run( + llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) +{ + // Running functin analysis + analyseFunction(function); - // If neither of the previous is the case, we are dealing with a non-static array - QubitArray qubit_array; - qubit_array.is_possibly_static = false; - qubit_array.variable_name = instruction.getName().str(); + // ... and return the result. + return results_; +} - // Storing the result - results_.push_back(std::move(qubit_array)); - } +QubitAllocationAnalysisPrinter::QubitAllocationAnalysisPrinter(llvm::raw_ostream &out_stream) + : out_stream_(out_stream) +{} - void QubitAllocationAnalysisAnalytics::analyseFunction(llvm::Function& function) +llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run(llvm::Function & function, + llvm::FunctionAnalysisManager &fam) +{ + auto &results = fam.getResult(function); + + if (!results.empty()) + { + out_stream_ << function.getName() << "\n"; + out_stream_ << "====================" + << "\n\n"; + for (auto const &ret : results) { - // Clearing results generated in a previous run - results_.clear(); - constantness_dependencies_.clear(); - - // Creating a list with function arguments - for (auto& arg : function.args()) + if (!ret.is_possibly_static) + { + out_stream_ << ret.variable_name << " is dynamic.\n"; + } + else + { + if (ret.depends_on.empty()) { - auto s = arg.getName().str(); - constantness_dependencies_.insert({&arg, {s}}); + out_stream_ << ret.variable_name << " is trivially static with " << ret.size + << " qubits."; } - - // Evaluating all expressions - for (auto& basic_block : function) + else { - for (auto& instruction : basic_block) + out_stream_ << ret.variable_name << " depends on "; + bool first = true; + for (auto &x : ret.depends_on) + { + if (!first) { - auto opcode = instruction.getOpcode(); - switch (opcode) - { - case llvm::Instruction::Sub: - case llvm::Instruction::Add: - case llvm::Instruction::Mul: - case llvm::Instruction::Shl: - case llvm::Instruction::LShr: - case llvm::Instruction::AShr: - case llvm::Instruction::And: - case llvm::Instruction::Or: - case llvm::Instruction::Xor: - if (operandsConstant(instruction)) - { - markPossibleConstant(instruction); - } - break; - case llvm::Instruction::Call: - analyseCall(instruction); - break; - // Unanalysed statements - case llvm::Instruction::Ret: - case llvm::Instruction::Br: - case llvm::Instruction::Switch: - case llvm::Instruction::IndirectBr: - case llvm::Instruction::Invoke: - case llvm::Instruction::Resume: - case llvm::Instruction::Unreachable: - case llvm::Instruction::CleanupRet: - case llvm::Instruction::CatchRet: - case llvm::Instruction::CatchSwitch: - case llvm::Instruction::CallBr: - case llvm::Instruction::FNeg: - case llvm::Instruction::FAdd: - case llvm::Instruction::FSub: - case llvm::Instruction::FMul: - case llvm::Instruction::UDiv: - case llvm::Instruction::SDiv: - case llvm::Instruction::FDiv: - case llvm::Instruction::URem: - case llvm::Instruction::SRem: - case llvm::Instruction::FRem: - case llvm::Instruction::Alloca: - case llvm::Instruction::Load: - case llvm::Instruction::Store: - case llvm::Instruction::GetElementPtr: - case llvm::Instruction::Fence: - case llvm::Instruction::AtomicCmpXchg: - case llvm::Instruction::AtomicRMW: - case llvm::Instruction::Trunc: - case llvm::Instruction::ZExt: - case llvm::Instruction::SExt: - case llvm::Instruction::FPToUI: - case llvm::Instruction::FPToSI: - case llvm::Instruction::UIToFP: - case llvm::Instruction::SIToFP: - case llvm::Instruction::FPTrunc: - case llvm::Instruction::FPExt: - case llvm::Instruction::PtrToInt: - case llvm::Instruction::IntToPtr: - case llvm::Instruction::BitCast: - case llvm::Instruction::AddrSpaceCast: - case llvm::Instruction::CleanupPad: - case llvm::Instruction::CatchPad: - case llvm::Instruction::ICmp: - case llvm::Instruction::FCmp: - case llvm::Instruction::PHI: - case llvm::Instruction::Select: - case llvm::Instruction::UserOp1: - case llvm::Instruction::UserOp2: - case llvm::Instruction::VAArg: - case llvm::Instruction::ExtractElement: - case llvm::Instruction::InsertElement: - case llvm::Instruction::ShuffleVector: - case llvm::Instruction::ExtractValue: - case llvm::Instruction::InsertValue: - case llvm::Instruction::LandingPad: - // End of Binary Ops - default: - break; - } + out_stream_ << ", "; } + out_stream_ << x; + first = false; + } + out_stream_ << " being constant to be static."; } - } + } - QubitAllocationAnalysisAnalytics::Result QubitAllocationAnalysisAnalytics::run( - llvm::Function& function, - llvm::FunctionAnalysisManager& /*unused*/) - { - // Running functin analysis - analyseFunction(function); - - // ... and return the result. - return results_; - } - - QubitAllocationAnalysisPrinter::QubitAllocationAnalysisPrinter(llvm::raw_ostream& out_stream) - : out_stream_(out_stream) - { + out_stream_ << "\n"; } + } - llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run( - llvm::Function& function, - llvm::FunctionAnalysisManager& fam) - { - auto& results = fam.getResult(function); + return llvm::PreservedAnalyses::all(); +} - if (!results.empty()) - { - out_stream_ << function.getName() << "\n"; - out_stream_ << "====================" - << "\n\n"; - for (auto const& ret : results) - { - if (!ret.is_possibly_static) - { - out_stream_ << ret.variable_name << " is dynamic.\n"; - } - else - { - if (ret.depends_on.empty()) - { - out_stream_ << ret.variable_name << " is trivially static with " << ret.size << " qubits."; - } - else - { - out_stream_ << ret.variable_name << " depends on "; - bool first = true; - for (auto& x : ret.depends_on) - { - if (!first) - { - out_stream_ << ", "; - } - out_stream_ << x; - first = false; - } - out_stream_ << " being constant to be static."; - } - } - - out_stream_ << "\n"; - } - } - - return llvm::PreservedAnalyses::all(); - } - - bool QubitAllocationAnalysisPrinter::isRequired() - { - return true; - } +bool QubitAllocationAnalysisPrinter::isRequired() +{ + return true; +} - llvm::AnalysisKey QubitAllocationAnalysisAnalytics::Key; +llvm::AnalysisKey QubitAllocationAnalysisAnalytics::Key; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft From 29b41e055b8bf4593564b16bb11b3d60c5398e0c Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 3 Aug 2021 13:55:35 +0200 Subject: [PATCH 050/106] Creating initial tree matching algorithm --- .../InstructionReplacement.cpp | 15 ++- .../InstructionReplacement.hpp | 3 +- .../libs/InstructionReplacement/Pattern.cpp | 40 ++++++- .../libs/InstructionReplacement/Pattern.hpp | 102 ++++++------------ 4 files changed, 82 insertions(+), 78 deletions(-) diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp b/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp index 7508c586fa..12e36ef802 100644 --- a/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp +++ b/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp @@ -21,11 +21,16 @@ llvm::PreservedAnalyses InstructionReplacementPass::run(llvm::Function &function instruction_stack_.clear(); for (auto &instr : basic_block) { - instruction_stack_.push_back(&instr); - if (match()) + // instruction_stack_.push_back(); + if (match(&instr)) { changed = true; - std::cout << "FOUND REPLACEMENT" << std::endl; + std::cout << "FOUND REPLACEMENT: " << instr.getNumOperands() << std::endl; + llvm::errs() << instr << "\n"; + for (uint32_t i = 0; i < instr.getNumOperands(); ++i) + { + llvm::errs() << " - " << (*instr.getOperand(i)) << "\n"; + } } } } @@ -40,11 +45,11 @@ bool InstructionReplacementPass::isRequired() return true; } -bool InstructionReplacementPass::match() const +bool InstructionReplacementPass::match(Value *value) const { for (auto const &pattern : patterns_) { - if (pattern.match(instruction_stack_)) + if (pattern.match(value)) { return true; } diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp b/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp index 6c0dffb1ca..7d18de132f 100644 --- a/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp +++ b/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp @@ -15,6 +15,7 @@ class InstructionReplacementPass : public llvm::PassInfoMixin; using InstructionStack = Pattern::InstructionStack; + using Value = llvm::Value; InstructionReplacementPass() { @@ -42,7 +43,7 @@ class InstructionReplacementPass : public llvm::PassInfoMixin(instr); if (call_instr == nullptr) @@ -26,8 +24,40 @@ bool CallPattern::match(Instruction *instr) return false; } - // TODO: Check operands - llvm::errs() << "Found call to " << name << "\n"; + return matchChildren(instr); +} + +OperandPrototype::~OperandPrototype() = default; +bool OperandPrototype::matchChildren(Value *value) const +{ + if (!children_.empty()) + { + auto user = llvm::dyn_cast(value); + if (user == nullptr) + { + return false; + } + + if (user->getNumOperands() != children_.size()) + { + return false; + } + + uint64_t i = 0; + while (i < children_.size()) + { + auto v = user->getOperand(static_cast(i)); + if (!children_[i]->match(v)) + { + return false; + } + ++i; + } + + return true; + } + + // TODO: Check other possibilities for value return true; } diff --git a/src/Passes/libs/InstructionReplacement/Pattern.hpp b/src/Passes/libs/InstructionReplacement/Pattern.hpp index 3127600b80..b75d74086b 100644 --- a/src/Passes/libs/InstructionReplacement/Pattern.hpp +++ b/src/Passes/libs/InstructionReplacement/Pattern.hpp @@ -5,61 +5,41 @@ namespace microsoft { namespace quantum { -class InstructionPattern +class OperandPrototype { public: using Instruction = llvm::Instruction; using String = std::string; - class OperandPrototype + using Value = llvm::Value; + using Child = std::shared_ptr; + using Children = std::vector; + OperandPrototype(bool capture = false, std::string const &capture_name = "") + : capture_{capture} + , capture_name_{capture_name} + {} + virtual ~OperandPrototype(); + virtual bool match(Value *value) const = 0; + + bool capture() const { - public: - enum MatchType - { - Any = 0, - ConstantInt, - ConstantFloat, - ConstantBool, - - Register, - NamedRegister, - AnonymousRegister, - - }; - - String name() const - { - return name_; - } - - private: - String name_; - // void * value_{nullptr}; - }; - using Operands = std::vector; - - /// @{ - InstructionPattern() = default; - InstructionPattern(InstructionPattern const &other) = default; - InstructionPattern(InstructionPattern &&other) = default; - InstructionPattern &operator=(InstructionPattern const &other) = default; - InstructionPattern &operator=(InstructionPattern &&other) = default; - /// @} - - virtual ~InstructionPattern(); - /// @{ - virtual bool match(Instruction *instr) = 0; - /// @} + return capture_; + } - Operands const &operands() + void addChild(Child const &child) { - return operands_; + children_.push_back(child); } +protected: + bool matchChildren(Value *value) const; + private: - Operands operands_; + bool capture_{false}; + std::string capture_name_{""}; + Children children_{}; }; -class CallPattern : public InstructionPattern +class CallPattern : public OperandPrototype { public: using String = std::string; @@ -67,7 +47,7 @@ class CallPattern : public InstructionPattern ~CallPattern() override; - bool match(Instruction *instr) override; + bool match(Value *instr) const override; private: String name_{}; @@ -76,38 +56,26 @@ class CallPattern : public InstructionPattern class Pattern { public: - using Instruction = llvm::Instruction; - using MatchList = std::vector>; - using InstructionStack = std::vector; - - void addPattern(std::unique_ptr &&pattern) + using Instruction = llvm::Instruction; + using Value = llvm::Value; + using InstructionStack = std::vector; + using OperandPrototypePtr = std::shared_ptr; + void addPattern(OperandPrototypePtr &&pattern) { - patterns_.emplace_back(std::move(pattern)); + pattern_ = std::move(pattern); } - bool match(InstructionStack const &stack) const + bool match(Value *value) const { - auto a = stack.size(); - auto b = patterns_.size(); - - while (a != 0 && b != 0) + if (pattern_ == nullptr) { - --a; - --b; - auto const &s = stack[a]; - auto const &p = patterns_[b]; - if (!p->match(s)) - { - return false; - } + return false; } - - llvm::errs() << "POSSIBLE MATCH\n"; - return true; + return pattern_->match(value); } private: - MatchList patterns_; + OperandPrototypePtr pattern_{nullptr}; }; // Propposed syntax for establishing rules From ed0461318ebeaf45b307264293943e385504f697 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 3 Aug 2021 16:17:43 +0200 Subject: [PATCH 051/106] Finishing access analysis --- .../ConstSizeArray/ConstSizeArray.qs | 11 +- .../analysis-example.ll | 85 ------------ .../InstructionReplacement.cpp | 22 ++- .../InstructionReplacement.hpp | 48 +++++-- .../libs/InstructionReplacement/Pattern.cpp | 129 ++++++++++++++---- .../libs/InstructionReplacement/Pattern.hpp | 86 +++++++++--- 6 files changed, 221 insertions(+), 160 deletions(-) delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index 99fe9b8162..543bc2eb6c 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -1,13 +1,15 @@ namespace Example { + open Microsoft.Quantum.Intrinsic; + @EntryPoint() operation Main() : Int { QuantumProgram(3,2,1); - QuantumProgram(4,X(2),4); + QuantumProgram(4,Xx(2),4); return 0; } - function X(value: Int): Int + function Xx(value: Int): Int { return 3 * value; } @@ -20,7 +22,10 @@ namespace Example { use qubits1 = Qubit[(y - 2)/2-z]; use qubits2 = Qubit[y - g]; use qubits3 = Qubit[h]; - use qubits4 = Qubit[X(x)]; + use qubits4 = Qubit[Xx(x)]; + X(qubits0[1]); + X(qubits0[2]); + X(qubits2[0]); } } \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll deleted file mode 100644 index a76121d5cb..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll +++ /dev/null @@ -1,85 +0,0 @@ -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Array = type opaque -%String = type opaque - -define internal fastcc void @Example__Main__body() unnamed_addr { -entry: - call fastcc void @Example__QuantumProgram__body(i64 3, i64 2, i64 1) - %0 = call fastcc i64 @Example__X__body(i64 2) - call fastcc void @Example__QuantumProgram__body(i64 4, i64 %0, i64 4) - ret void -} - -define internal fastcc void @Example__QuantumProgram__body(i64 %x, i64 %h, i64 %g) unnamed_addr { -entry: - %.neg = xor i64 %x, -1 - %.neg1 = mul i64 %.neg, %x - %z.neg = add i64 %.neg1, 47 - %y = mul i64 %x, 3 - %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) - %0 = add i64 %y, -2 - %1 = lshr i64 %0, 1 - %2 = add i64 %z.neg, %1 - %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) - %3 = sub i64 %y, %g - %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) - %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %h) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) - %4 = call fastcc i64 @Example__X__body(i64 %x) - %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits0) - ret void -} - -; Function Attrs: norecurse nounwind readnone -define internal fastcc i64 @Example__X__body(i64 %value) unnamed_addr #0 { -entry: - %0 = mul i64 %value, 3 - ret i64 %0 -} - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define i64 @Example__Main__Interop() local_unnamed_addr #1 { -entry: - call fastcc void @Example__Main__body() - ret i64 0 -} - -define void @Example__Main() local_unnamed_addr #2 { -entry: - call fastcc void @Example__Main__body() - %0 = call %String* @__quantum__rt__int_to_string(i64 0) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr - -attributes #0 = { norecurse nounwind readnone } -attributes #1 = { "InteropFriendly" } -attributes #2 = { "EntryPoint" } diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp b/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp index 12e36ef802..876057d8a9 100644 --- a/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp +++ b/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp @@ -17,20 +17,12 @@ llvm::PreservedAnalyses InstructionReplacementPass::run(llvm::Function &function bool changed{false}; for (auto &basic_block : function) { - - instruction_stack_.clear(); for (auto &instr : basic_block) { // instruction_stack_.push_back(); - if (match(&instr)) + if (matchAndReplace(&instr)) { changed = true; - std::cout << "FOUND REPLACEMENT: " << instr.getNumOperands() << std::endl; - llvm::errs() << instr << "\n"; - for (uint32_t i = 0; i < instr.getNumOperands(); ++i) - { - llvm::errs() << " - " << (*instr.getOperand(i)) << "\n"; - } } } } @@ -45,13 +37,17 @@ bool InstructionReplacementPass::isRequired() return true; } -bool InstructionReplacementPass::match(Value *value) const +bool InstructionReplacementPass::matchAndReplace(Value *value) const { - for (auto const &pattern : patterns_) + Captures captures; + for (auto const &rule : rules_) { - if (pattern.match(value)) + if (rule.match(value, captures)) { - return true; + if (rule.replace(value, captures)) + { + return true; + } } } return false; diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp b/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp index 7d18de132f..8777db1b59 100644 --- a/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp +++ b/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp @@ -12,16 +12,45 @@ namespace quantum { class InstructionReplacementPass : public llvm::PassInfoMixin { public: - using Instruction = llvm::Instruction; - using Patterns = std::vector; - using InstructionStack = Pattern::InstructionStack; - using Value = llvm::Value; + using Captures = OperandPrototype::Captures; + using Instruction = llvm::Instruction; + using Rules = std::vector; + using Value = llvm::Value; InstructionReplacementPass() { - Pattern pattern; - pattern.addPattern(std::make_unique("__quantum__rt__array_update_alias_count")); - patterns_.emplace_back(std::move(pattern)); + + auto array_name = std::make_shared(); + auto index = std::make_shared(); + array_name->enableCapture("arrayName"); + index->enableCapture("index"); + + auto get_element = std::make_shared("__quantum__rt__array_get_element_ptr_1d"); + get_element->addChild(array_name); + get_element->addChild(index); + // Function name is last arg? + get_element->addChild(std::make_shared()); + + auto load_pattern = std::make_shared(); + auto cast_pattern = std::make_shared(); + + cast_pattern->addChild(get_element); + load_pattern->addChild(cast_pattern); + + ReplacementRule rule1; + rule1.setPattern(load_pattern); + rule1.setReplacer([](Value *val, Captures &cap) { + llvm::errs() << "Found qubit load access operator " << val->getName() << " = " + << cap["arrayName"]->getName() << "[" << *cap["index"] << "]\n"; + return true; + }); + rules_.emplace_back(std::move(rule1)); + + /* + ReplacementRule rule2; + rule2.setPattern(get_element); + rules_.emplace_back(std::move(rule2)); + */ } /// Constructors and destructors @@ -43,11 +72,10 @@ class InstructionReplacementPass : public llvm::PassInfoMixin(instr); - if (call_instr == nullptr) - { - return false; - } - - auto target_function = call_instr->getCalledFunction(); - auto name = target_function->getName(); - - if (name != name_) - { - return false; - } - - return matchChildren(instr); -} - OperandPrototype::~OperandPrototype() = default; -bool OperandPrototype::matchChildren(Value *value) const +bool OperandPrototype::matchChildren(Value *value, Captures &captures) const { + auto user = llvm::dyn_cast(value); if (!children_.empty()) { - auto user = llvm::dyn_cast(value); + if (user == nullptr) { return false; @@ -47,20 +24,118 @@ bool OperandPrototype::matchChildren(Value *value) const while (i < children_.size()) { auto v = user->getOperand(static_cast(i)); - if (!children_[i]->match(v)) + if (!children_[i]->match(v, captures)) { return false; } ++i; } + // llvm::errs() << "SUCCESS MATCH: " << *value << "\n"; + return true; } + // llvm::errs() << "SUCCESS MATCH: " << *value << " " << user->getNumOperands() << "\n"; // TODO: Check other possibilities for value return true; } +bool OperandPrototype::fail(Value * /*value*/, Captures & /*captures*/) const +{ + return false; +} + +bool OperandPrototype::success(Value *value, Captures &captures) const +{ + capture(value, captures); + + auto ret = matchChildren(value, captures); + if (!ret) + { + uncapture(value, captures); + } + return ret; +} + +void OperandPrototype::capture(Value *value, Captures &captures) const +{ + if (!capture_name_.empty()) + { + captures[capture_name_] = value; + } +} + +void OperandPrototype::uncapture(Value * /*value*/, Captures &captures) const +{ + if (!capture_name_.empty()) + { + captures.erase(captures.find(capture_name_)); + } +} + +CallPattern::CallPattern(String const &name) + : name_{name} +{} + +CallPattern::~CallPattern() = default; + +bool CallPattern::match(Value *instr, Captures &captures) const +{ + auto *call_instr = llvm::dyn_cast(instr); + if (call_instr == nullptr) + { + return fail(instr, captures); + } + + auto target_function = call_instr->getCalledFunction(); + auto name = target_function->getName(); + + if (name != name_) + { + return fail(instr, captures); + } + + return success(instr, captures); +} + +AnyPattern::AnyPattern() = default; +AnyPattern::~AnyPattern() = default; +bool AnyPattern::match(Value *instr, Captures &captures) const +{ + return success(instr, captures); +} + +template +InstructionPattern::~InstructionPattern() = default; +template +bool InstructionPattern::match(Value *instr, Captures &captures) const +{ + auto *load_instr = llvm::dyn_cast(instr); + if (load_instr == nullptr) + { + return fail(instr, captures); + } + + return success(instr, captures); +} + +// TODO(tfr): This seems to be a bug in LLVM. Template instantiations in +// a single translation unit is not supposed to reinstantiate across other +// translation units. +// +// However, it is suspecious that htis problem has been around since Clang 8. +// so this needs more investigation. For now, this work around suffices +// See +// https://bugs.llvm.org/show_bug.cgi?id=18733 +// https://stackoverflow.com/questions/56041900/why-does-explicit-template-instantiation-result-in-weak-template-vtables-warning +// for more information +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wweak-template-vtables" +template class InstructionPattern; +template class InstructionPattern; +#pragma clang diagnostic pop + } // namespace quantum } // namespace microsoft diff --git a/src/Passes/libs/InstructionReplacement/Pattern.hpp b/src/Passes/libs/InstructionReplacement/Pattern.hpp index b75d74086b..2684ef286d 100644 --- a/src/Passes/libs/InstructionReplacement/Pattern.hpp +++ b/src/Passes/libs/InstructionReplacement/Pattern.hpp @@ -1,44 +1,56 @@ #pragma once #include "Llvm.hpp" +#include #include + namespace microsoft { namespace quantum { class OperandPrototype { public: - using Instruction = llvm::Instruction; - using String = std::string; - using Value = llvm::Value; - using Child = std::shared_ptr; - using Children = std::vector; - OperandPrototype(bool capture = false, std::string const &capture_name = "") - : capture_{capture} - , capture_name_{capture_name} - {} + using Instruction = llvm::Instruction; + using String = std::string; + using Value = llvm::Value; + using Child = std::shared_ptr; + using Children = std::vector; + using Captures = std::unordered_map; + OperandPrototype() = default; virtual ~OperandPrototype(); - virtual bool match(Value *value) const = 0; + virtual bool match(Value *value, Captures &captures) const = 0; - bool capture() const + void addChild(Child const &child) { - return capture_; + children_.push_back(child); } - void addChild(Child const &child) + void enableCapture(std::string capture_name) { - children_.push_back(child); + capture_name_ = capture_name; } protected: - bool matchChildren(Value *value) const; + bool fail(Value *value, Captures &captures) const; + bool success(Value *value, Captures &captures) const; private: - bool capture_{false}; + bool matchChildren(Value *value, Captures &captures) const; + void capture(Value *value, Captures &captures) const; + void uncapture(Value *value, Captures &captures) const; + std::string capture_name_{""}; Children children_{}; }; +class AnyPattern : public OperandPrototype +{ +public: + AnyPattern(); + ~AnyPattern() override; + bool match(Value *instr, Captures &captures) const override; +}; + class CallPattern : public OperandPrototype { public: @@ -47,35 +59,65 @@ class CallPattern : public OperandPrototype ~CallPattern() override; - bool match(Value *instr) const override; + bool match(Value *instr, Captures &captures) const override; private: String name_{}; }; -class Pattern +template +class InstructionPattern : public OperandPrototype { public: + using OperandPrototype::OperandPrototype; + ~InstructionPattern() override; + bool match(Value *instr, Captures &captures) const override; +}; + +using LoadPattern = InstructionPattern; +using BitCastPattern = InstructionPattern; + +class ReplacementRule +{ +public: + using Captures = OperandPrototype::Captures; using Instruction = llvm::Instruction; using Value = llvm::Value; - using InstructionStack = std::vector; using OperandPrototypePtr = std::shared_ptr; - void addPattern(OperandPrototypePtr &&pattern) + using ReplaceFunction = std::function; + + void setPattern(OperandPrototypePtr &&pattern) { pattern_ = std::move(pattern); } - bool match(Value *value) const + void setReplacer(ReplaceFunction const &replacer) + { + replacer_ = replacer; + } + + bool match(Value *value, Captures &captures) const { if (pattern_ == nullptr) { return false; } - return pattern_->match(value); + return pattern_->match(value, captures); + } + + bool replace(Value *value, Captures &captures) const + { + if (replacer_) + { + return replacer_(value, captures); + } + + return false; } private: OperandPrototypePtr pattern_{nullptr}; + ReplaceFunction replacer_{nullptr}; }; // Propposed syntax for establishing rules From f3f8810c3e2e8a52d69770fe26bd2dd5b76b0e4f Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 4 Aug 2021 14:11:00 +0200 Subject: [PATCH 052/106] Implementing the replacements --- .../ConstSizeArray/ConstSizeArray.qs | 35 ++---- .../examples/QubitAllocationAnalysis/Makefile | 2 +- .../analysis-example.ll | 77 ++++++++++++ .../InstructionReplacement.cpp | 53 +++++++- .../InstructionReplacement.hpp | 118 +++++++++++++++--- .../libs/InstructionReplacement/Pattern.cpp | 1 - .../libs/InstructionReplacement/Pattern.hpp | 9 +- .../QubitAllocationManager.hpp | 81 ++++++++++++ 8 files changed, 329 insertions(+), 47 deletions(-) create mode 100644 src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll create mode 100644 src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index 543bc2eb6c..947ffc7f2e 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -1,31 +1,22 @@ namespace Example { + open Microsoft.Quantum.Measurement; open Microsoft.Quantum.Intrinsic; - + open Microsoft.Quantum.Canon; + @EntryPoint() operation Main() : Int { - QuantumProgram(3,2,1); - QuantumProgram(4,Xx(2),4); - return 0; - } + use qubits2 = Qubit[3]; + use qubits1 = Qubit[3]; - function Xx(value: Int): Int - { - return 3 * value; - } + X(qubits1[0]); + X(qubits1[1]); + X(qubits1[2]); - operation QuantumProgram(x: Int, h: Int, g: Int) : Unit { - let z = x * (x + 1) - 47; - let y = 3 * x; - - use qubits0 = Qubit[9]; - use qubits1 = Qubit[(y - 2)/2-z]; - use qubits2 = Qubit[y - g]; - use qubits3 = Qubit[h]; - use qubits4 = Qubit[Xx(x)]; - - X(qubits0[1]); - X(qubits0[2]); X(qubits2[0]); + X(qubits2[1]); + X(qubits2[2]); + + return 0; } -} \ No newline at end of file +} diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index d739337e0a..2c3ce56f1a 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -7,7 +7,7 @@ run: build-qaa analysis-example.ll opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll run-replace: build-ir analysis-example.ll - opt -load-pass-plugin ../../Debug/libs/libInstructionReplacement.dylib --passes="instruction-replacement" -disable-output analysis-example.ll + opt -load-pass-plugin ../../Debug/libs/libInstructionReplacement.dylib --passes="instruction-replacement" -S analysis-example.ll build-prepare: diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll new file mode 100644 index 0000000000..be993311fa --- /dev/null +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -0,0 +1,77 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%Qubit = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) + %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits1, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %qubit = load %Qubit*, %Qubit** %1, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits1, i64 1) + %3 = bitcast i8* %2 to %Qubit** + %qubit__1 = load %Qubit*, %Qubit** %3, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit__1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits1, i64 2) + %5 = bitcast i8* %4 to %Qubit** + %qubit__2 = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit__2) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits2, i64 0) + %7 = bitcast i8* %6 to %Qubit** + %qubit__3 = load %Qubit*, %Qubit** %7, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit__3) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits2, i64 1) + %9 = bitcast i8* %8 to %Qubit** + %qubit__4 = load %Qubit*, %Qubit** %9, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit__4) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits2, i64 2) + %11 = bitcast i8* %10 to %Qubit** + %qubit__5 = load %Qubit*, %Qubit** %11, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit__5) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits2) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr + +declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #0 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp b/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp index 876057d8a9..137f873900 100644 --- a/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp +++ b/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp @@ -13,13 +13,37 @@ namespace quantum { llvm::PreservedAnalyses InstructionReplacementPass::run(llvm::Function &function, llvm::FunctionAnalysisManager & /*fam*/) { + replacements_.clear(); + // Pass body bool changed{false}; for (auto &basic_block : function) { for (auto &instr : basic_block) { - // instruction_stack_.push_back(); + /* + llvm::errs() << instr << "\n"; + for (uint32_t i = 0; i < instr.getNumOperands(); ++i) + { + auto op = llvm::dyn_cast(instr.getOperand(i)); + if (op == nullptr) + { + continue; + } + llvm::errs() << " - " << *op << "\n"; + for (uint32_t j = 0; j < op->getNumOperands(); ++j) + { + auto x = op->getOperand(j); + if (x == nullptr) + { + continue; + } + llvm::errs() << " * " << *x << "\n"; + } + } + + llvm::errs() << "\n\n"; + */ if (matchAndReplace(&instr)) { changed = true; @@ -27,9 +51,29 @@ llvm::PreservedAnalyses InstructionReplacementPass::run(llvm::Function &function } } + llvm::errs() << "REPLACEMENTS!" << this << "\n"; + + for (auto it = replacements_.rbegin(); it != replacements_.rend(); ++it) + { + if (it->second != nullptr) + { + llvm::errs() << "Replacing " << *it->first; + llvm::ReplaceInstWithInst(it->first, it->second); + llvm::errs() << " with " << *it->second << "\n"; + } + else + { + auto instruction = it->first; + if (!instruction->use_empty()) + { + instruction->replaceAllUsesWith(llvm::UndefValue::get(instruction->getType())); + } + instruction->eraseFromParent(); + } + } // llvm::errs() << "Implement your pass here: " << function.getName() << "\n"; - return llvm::PreservedAnalyses::all(); + return llvm::PreservedAnalyses::none(); } bool InstructionReplacementPass::isRequired() @@ -37,14 +81,15 @@ bool InstructionReplacementPass::isRequired() return true; } -bool InstructionReplacementPass::matchAndReplace(Value *value) const +bool InstructionReplacementPass::matchAndReplace(Instruction *value) { Captures captures; for (auto const &rule : rules_) { if (rule.match(value, captures)) { - if (rule.replace(value, captures)) + llvm::IRBuilder<> builder{value}; + if (rule.replace(builder, value, captures, replacements_)) { return true; } diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp b/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp index 8777db1b59..f9d0b3814c 100644 --- a/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp +++ b/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp @@ -2,6 +2,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include "InstructionReplacement/Pattern.hpp" +#include "InstructionReplacement/QubitAllocationManager.hpp" #include "Llvm.hpp" #include @@ -12,13 +13,18 @@ namespace quantum { class InstructionReplacementPass : public llvm::PassInfoMixin { public: - using Captures = OperandPrototype::Captures; - using Instruction = llvm::Instruction; - using Rules = std::vector; - using Value = llvm::Value; + using Captures = OperandPrototype::Captures; + using Replacements = ReplacementRule::Replacements; + using Instruction = llvm::Instruction; + using Rules = std::vector; + using Value = llvm::Value; + using Builder = ReplacementRule::Builder; + using QubitAllocationManagerPtr = QubitAllocationManager::QubitAllocationManagerPtr; InstructionReplacementPass() + : allocation_manager_{QubitAllocationManager::createNew()} { + auto alloc_manager = allocation_manager_; auto array_name = std::make_shared(); auto index = std::make_shared(); @@ -28,6 +34,8 @@ class InstructionReplacementPass : public llvm::PassInfoMixin("__quantum__rt__array_get_element_ptr_1d"); get_element->addChild(array_name); get_element->addChild(index); + get_element->enableCapture("getelement"); + // Function name is last arg? get_element->addChild(std::make_shared()); @@ -35,34 +43,109 @@ class InstructionReplacementPass : public llvm::PassInfoMixin(); cast_pattern->addChild(get_element); + cast_pattern->enableCapture("cast"); + load_pattern->addChild(cast_pattern); ReplacementRule rule1; rule1.setPattern(load_pattern); - rule1.setReplacer([](Value *val, Captures &cap) { - llvm::errs() << "Found qubit load access operator " << val->getName() << " = " - << cap["arrayName"]->getName() << "[" << *cap["index"] << "]\n"; - return true; - }); + rule1.setReplacer( + [alloc_manager](Builder &builder, Value *val, Captures &cap, Replacements &replacements) { + auto ptr_type = llvm::dyn_cast(val->getType()); + if (ptr_type == nullptr) + { + llvm::errs() << "Failed to cast type\n"; + return false; + } + + auto cst = llvm::dyn_cast(cap["index"]); + if (cst == nullptr) + { + return false; + } + + auto llvm_size = cst->getValue(); + auto offset = alloc_manager->getOffset(cap["arrayName"]->getName().str()); + + // Creating a new index APInt that is shifted by the offset of the allocation + auto idx = llvm::APInt(llvm_size.getBitWidth(), llvm_size.getZExtValue() + offset); + + // Computing offset + auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); + + // TODO(tfr): Understand what the significance of the addressspace is in relation to the + // QIR. Activate by uncommenting: + // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); + auto instr = new llvm::IntToPtrInst(new_index, ptr_type); + instr->takeName(val); + + // Replacing the instruction with new instruction + replacements.push_back({llvm::dyn_cast(val), instr}); + + // Deleting the getelement and cast operations + replacements.push_back({llvm::dyn_cast(cap["getelement"]), nullptr}); + replacements.push_back({llvm::dyn_cast(cap["cast"]), nullptr}); + + return true; + }); rules_.emplace_back(std::move(rule1)); - /* ReplacementRule rule2; - rule2.setPattern(get_element); + auto alias_count = std::make_shared("__quantum__rt__array_update_alias_count"); + rule2.setPattern(alias_count); + rule2.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); rules_.emplace_back(std::move(rule2)); - */ + + ReplacementRule rule3; + auto release_call = std::make_shared("__quantum__rt__qubit_release_array"); + rule3.setPattern(release_call); + rule3.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); + rules_.emplace_back(std::move(rule3)); + + ReplacementRule rule4; + auto allocate_call = std::make_shared("__quantum__rt__qubit_allocate_array"); + + auto size = std::make_shared(); + + size->enableCapture("size"); + allocate_call->addChild(size); + allocate_call->addChild(std::make_shared()); + + rule4.setPattern(allocate_call); + + rule4.setReplacer( + [alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { + auto cst = llvm::dyn_cast(cap["size"]); + if (cst == nullptr) + { + return false; + } + + auto llvm_size = cst->getValue(); + alloc_manager->allocate(val->getName().str(), llvm_size.getZExtValue()); + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); + + rules_.emplace_back(std::move(rule4)); } /// Constructors and destructors /// @{ - InstructionReplacementPass(InstructionReplacementPass const &) = default; + InstructionReplacementPass(InstructionReplacementPass const &) = delete; InstructionReplacementPass(InstructionReplacementPass &&) = default; ~InstructionReplacementPass() = default; /// @} /// Operators /// @{ - InstructionReplacementPass &operator=(InstructionReplacementPass const &) = default; + InstructionReplacementPass &operator=(InstructionReplacementPass const &) = delete; InstructionReplacementPass &operator=(InstructionReplacementPass &&) = default; /// @} @@ -72,10 +155,13 @@ class InstructionReplacementPass : public llvm::PassInfoMixin(value); if (!children_.empty()) { - if (user == nullptr) { return false; diff --git a/src/Passes/libs/InstructionReplacement/Pattern.hpp b/src/Passes/libs/InstructionReplacement/Pattern.hpp index 2684ef286d..9577338969 100644 --- a/src/Passes/libs/InstructionReplacement/Pattern.hpp +++ b/src/Passes/libs/InstructionReplacement/Pattern.hpp @@ -84,7 +84,10 @@ class ReplacementRule using Instruction = llvm::Instruction; using Value = llvm::Value; using OperandPrototypePtr = std::shared_ptr; - using ReplaceFunction = std::function; + using Builder = llvm::IRBuilder<>; + using Replacements = std::vector>; + + using ReplaceFunction = std::function; void setPattern(OperandPrototypePtr &&pattern) { @@ -105,11 +108,11 @@ class ReplacementRule return pattern_->match(value, captures); } - bool replace(Value *value, Captures &captures) const + bool replace(Builder &builder, Value *value, Captures &captures, Replacements &replacements) const { if (replacer_) { - return replacer_(value, captures); + return replacer_(builder, value, captures, replacements); } return false; diff --git a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp new file mode 100644 index 0000000000..52220b1d5a --- /dev/null +++ b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include + +namespace microsoft { +namespace quantum { + +class QubitAllocationManager +{ +public: + using Index = uint64_t; + using String = std::string; + using QubitAllocationManagerPtr = std::shared_ptr; + + struct MemoryMapping + { + String name{""}; + Index index{0}; + Index size{0}; + Index start{0}; + Index end{0}; ///< Index not included in memory segment + }; + using NameToIndex = std::unordered_map; + using Mappings = std::vector; + + static QubitAllocationManagerPtr createNew() + { + QubitAllocationManagerPtr ret; + ret.reset(new QubitAllocationManager()); + + return ret; + } + + void allocate(String &&name, Index &&size) + { + MemoryMapping map; + map.name = std::move(name); + map.index = mappings_.size(); + map.size = std::move(size); + + if (name_to_index_.find(map.name) != name_to_index_.end()) + { + throw std::runtime_error("Memory segment with name " + map.name + " already exists."); + } + + name_to_index_[map.name] = map.index; + if (!mappings_.empty()) + { + map.start = mappings_.back().end; + } + + map.end = map.start + size; + mappings_.emplace_back(std::move(map)); + } + + Index getOffset(String const &name) const + { + auto it = name_to_index_.find(name); + if (it == name_to_index_.end()) + { + throw std::runtime_error("Memory segment with name " + name + " not found."); + } + auto index = it->second; + + return mappings_[index].start; + } + + void release(String const & /*name*/) + {} + +private: + QubitAllocationManager() = default; + + NameToIndex name_to_index_; + Mappings mappings_; +}; + +} // namespace quantum +} // namespace microsoft From 6a9fc4b8372ff33f08842b7b0bcb9ad567af965b Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 4 Aug 2021 14:49:22 +0200 Subject: [PATCH 053/106] Tidying code --- .../InstructionReplacement.cpp | 184 ++++++++++++++---- .../InstructionReplacement.hpp | 124 +----------- .../libs/InstructionReplacement/Pattern.cpp | 3 + .../libs/InstructionReplacement/Pattern.hpp | 66 ++----- .../QubitAllocationManager.cpp | 57 ++++++ .../QubitAllocationManager.hpp | 57 +----- .../libs/InstructionReplacement/Rule.cpp | 40 ++++ .../libs/InstructionReplacement/Rule.hpp | 42 ++++ 8 files changed, 318 insertions(+), 255 deletions(-) create mode 100644 src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp create mode 100644 src/Passes/libs/InstructionReplacement/Rule.cpp create mode 100644 src/Passes/libs/InstructionReplacement/Rule.hpp diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp b/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp index 137f873900..b149c42ae6 100644 --- a/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp +++ b/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp @@ -10,69 +10,182 @@ namespace microsoft { namespace quantum { + +InstructionReplacementPass::InstructionReplacementPass() +{ + // Shared pointer to be captured in the lambdas of the patterns + // Note that you cannot capture this as the reference is destroyed upon + // copy. Since PassInfoMixin requires copy, such a construct would break + auto alloc_manager = QubitAllocationManager::createNew(); + + // Pattern 1 - Get array index + auto array_name = std::make_shared(); + auto index = std::make_shared(); + array_name->enableCapture("arrayName"); + index->enableCapture("index"); + + auto get_element = std::make_shared("__quantum__rt__array_get_element_ptr_1d"); + get_element->addChild(array_name); + get_element->addChild(index); + get_element->enableCapture("getelement"); + + // Function name + get_element->addChild(std::make_shared()); + + auto load_pattern = std::make_shared(); + auto cast_pattern = std::make_shared(); + + cast_pattern->addChild(get_element); + cast_pattern->enableCapture("cast"); + + load_pattern->addChild(cast_pattern); + + // Rule 1 + ReplacementRule rule1; + rule1.setPattern(load_pattern); + + // Replacement details + rule1.setReplacer( + [alloc_manager](Builder &builder, Value *val, Captures &cap, Replacements &replacements) { + // Getting the type pointer + auto ptr_type = llvm::dyn_cast(val->getType()); + if (ptr_type == nullptr) + { + return false; + } + + // Get the index and testing that it is a constant int + auto cst = llvm::dyn_cast(cap["index"]); + if (cst == nullptr) + { + // ... if not, we cannot perform the mapping. + return false; + } + + // Computing the index by getting the current index value and offseting by + // the offset at which the qubit array is allocated. + auto llvm_size = cst->getValue(); + auto offset = alloc_manager->getOffset(cap["arrayName"]->getName().str()); + + // Creating a new index APInt that is shifted by the offset of the allocation + auto idx = llvm::APInt(llvm_size.getBitWidth(), llvm_size.getZExtValue() + offset); + + // Computing offset + auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); + + // TODO(tfr): Understand what the significance of the addressspace is in relation to the + // QIR. Activate by uncommenting: + // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); + auto instr = new llvm::IntToPtrInst(new_index, ptr_type); + instr->takeName(val); + + // Replacing the instruction with new instruction + replacements.push_back({llvm::dyn_cast(val), instr}); + + // Deleting the getelement and cast operations + replacements.push_back({llvm::dyn_cast(cap["getelement"]), nullptr}); + replacements.push_back({llvm::dyn_cast(cap["cast"]), nullptr}); + + return true; + }); + rules_.emplace_back(std::move(rule1)); + + // Rule 2 - delete __quantum__rt__array_update_alias_count + ReplacementRule rule2; + auto alias_count = std::make_shared("__quantum__rt__array_update_alias_count"); + rule2.setPattern(alias_count); + rule2.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); + rules_.emplace_back(std::move(rule2)); + + // Rule 3 - delete __quantum__rt__qubit_release_array + ReplacementRule rule3; + auto release_call = std::make_shared("__quantum__rt__qubit_release_array"); + rule3.setPattern(release_call); + rule3.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); + rules_.emplace_back(std::move(rule3)); + + // Rule 4 - perform static allocation and delete __quantum__rt__qubit_allocate_array + ReplacementRule rule4; + auto allocate_call = std::make_shared("__quantum__rt__qubit_allocate_array"); + + auto size = std::make_shared(); + + size->enableCapture("size"); + allocate_call->addChild(size); + allocate_call->addChild(std::make_shared()); + + rule4.setPattern(allocate_call); + + rule4.setReplacer( + [alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { + auto cst = llvm::dyn_cast(cap["size"]); + if (cst == nullptr) + { + return false; + } + + auto llvm_size = cst->getValue(); + alloc_manager->allocate(val->getName().str(), llvm_size.getZExtValue()); + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); + + rules_.emplace_back(std::move(rule4)); +} + llvm::PreservedAnalyses InstructionReplacementPass::run(llvm::Function &function, llvm::FunctionAnalysisManager & /*fam*/) { replacements_.clear(); - // Pass body - bool changed{false}; + // For every instruction in every block, we attempt a match + // and replace. for (auto &basic_block : function) { for (auto &instr : basic_block) { - /* - llvm::errs() << instr << "\n"; - for (uint32_t i = 0; i < instr.getNumOperands(); ++i) - { - auto op = llvm::dyn_cast(instr.getOperand(i)); - if (op == nullptr) - { - continue; - } - llvm::errs() << " - " << *op << "\n"; - for (uint32_t j = 0; j < op->getNumOperands(); ++j) - { - auto x = op->getOperand(j); - if (x == nullptr) - { - continue; - } - llvm::errs() << " * " << *x << "\n"; - } - } - - llvm::errs() << "\n\n"; - */ - if (matchAndReplace(&instr)) - { - changed = true; - } + matchAndReplace(&instr); } } - llvm::errs() << "REPLACEMENTS!" << this << "\n"; - + // Applying all replacements for (auto it = replacements_.rbegin(); it != replacements_.rend(); ++it) { + // Cheking if have a replacement for the instruction if (it->second != nullptr) { - llvm::errs() << "Replacing " << *it->first; + // ... if so, we just replace it, llvm::ReplaceInstWithInst(it->first, it->second); - llvm::errs() << " with " << *it->second << "\n"; } else { + // ... otherwise we delete the the instruction auto instruction = it->first; + + // Removing all uses if (!instruction->use_empty()) { instruction->replaceAllUsesWith(llvm::UndefValue::get(instruction->getType())); } + + // And finally we delete the instruction instruction->eraseFromParent(); } } - // llvm::errs() << "Implement your pass here: " << function.getName() << "\n"; + // If we did not change the IR, we report that we preserved all + if (replacements_.empty()) + { + return llvm::PreservedAnalyses::all(); + } + + // ... and otherwise, we report that we preserved none. return llvm::PreservedAnalyses::none(); } @@ -86,8 +199,11 @@ bool InstructionReplacementPass::matchAndReplace(Instruction *value) Captures captures; for (auto const &rule : rules_) { + // Checking if the rule is matched and keep track of captured nodes if (rule.match(value, captures)) { + + // If it is matched, we attempt to replace it llvm::IRBuilder<> builder{value}; if (rule.replace(builder, value, captures, replacements_)) { diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp b/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp index f9d0b3814c..4186ed200b 100644 --- a/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp +++ b/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp @@ -1,8 +1,10 @@ #pragma once // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. + #include "InstructionReplacement/Pattern.hpp" #include "InstructionReplacement/QubitAllocationManager.hpp" +#include "InstructionReplacement/Rule.hpp" #include "Llvm.hpp" #include @@ -21,123 +23,9 @@ class InstructionReplacementPass : public llvm::PassInfoMixin(); - auto index = std::make_shared(); - array_name->enableCapture("arrayName"); - index->enableCapture("index"); - - auto get_element = std::make_shared("__quantum__rt__array_get_element_ptr_1d"); - get_element->addChild(array_name); - get_element->addChild(index); - get_element->enableCapture("getelement"); - - // Function name is last arg? - get_element->addChild(std::make_shared()); - - auto load_pattern = std::make_shared(); - auto cast_pattern = std::make_shared(); - - cast_pattern->addChild(get_element); - cast_pattern->enableCapture("cast"); - - load_pattern->addChild(cast_pattern); - - ReplacementRule rule1; - rule1.setPattern(load_pattern); - rule1.setReplacer( - [alloc_manager](Builder &builder, Value *val, Captures &cap, Replacements &replacements) { - auto ptr_type = llvm::dyn_cast(val->getType()); - if (ptr_type == nullptr) - { - llvm::errs() << "Failed to cast type\n"; - return false; - } - - auto cst = llvm::dyn_cast(cap["index"]); - if (cst == nullptr) - { - return false; - } - - auto llvm_size = cst->getValue(); - auto offset = alloc_manager->getOffset(cap["arrayName"]->getName().str()); - - // Creating a new index APInt that is shifted by the offset of the allocation - auto idx = llvm::APInt(llvm_size.getBitWidth(), llvm_size.getZExtValue() + offset); - - // Computing offset - auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); - - // TODO(tfr): Understand what the significance of the addressspace is in relation to the - // QIR. Activate by uncommenting: - // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); - auto instr = new llvm::IntToPtrInst(new_index, ptr_type); - instr->takeName(val); - - // Replacing the instruction with new instruction - replacements.push_back({llvm::dyn_cast(val), instr}); - - // Deleting the getelement and cast operations - replacements.push_back({llvm::dyn_cast(cap["getelement"]), nullptr}); - replacements.push_back({llvm::dyn_cast(cap["cast"]), nullptr}); - - return true; - }); - rules_.emplace_back(std::move(rule1)); - - ReplacementRule rule2; - auto alias_count = std::make_shared("__quantum__rt__array_update_alias_count"); - rule2.setPattern(alias_count); - rule2.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - rules_.emplace_back(std::move(rule2)); - - ReplacementRule rule3; - auto release_call = std::make_shared("__quantum__rt__qubit_release_array"); - rule3.setPattern(release_call); - rule3.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - rules_.emplace_back(std::move(rule3)); - - ReplacementRule rule4; - auto allocate_call = std::make_shared("__quantum__rt__qubit_allocate_array"); - - auto size = std::make_shared(); - - size->enableCapture("size"); - allocate_call->addChild(size); - allocate_call->addChild(std::make_shared()); - - rule4.setPattern(allocate_call); - - rule4.setReplacer( - [alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { - auto cst = llvm::dyn_cast(cap["size"]); - if (cst == nullptr) - { - return false; - } - - auto llvm_size = cst->getValue(); - alloc_manager->allocate(val->getName().str(), llvm_size.getZExtValue()); - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - - rules_.emplace_back(std::move(rule4)); - } - /// Constructors and destructors /// @{ + InstructionReplacementPass(); InstructionReplacementPass(InstructionReplacementPass const &) = delete; InstructionReplacementPass(InstructionReplacementPass &&) = default; ~InstructionReplacementPass() = default; @@ -158,10 +46,8 @@ class InstructionReplacementPass : public llvm::PassInfoMixin @@ -10,13 +13,15 @@ namespace quantum { class OperandPrototype { public: - using Instruction = llvm::Instruction; - using String = std::string; - using Value = llvm::Value; - using Child = std::shared_ptr; - using Children = std::vector; - using Captures = std::unordered_map; + using Instruction = llvm::Instruction; + using String = std::string; + using Value = llvm::Value; + using Child = std::shared_ptr; + using Children = std::vector; + using Captures = std::unordered_map; + OperandPrototype() = default; + virtual ~OperandPrototype(); virtual bool match(Value *value, Captures &captures) const = 0; @@ -77,54 +82,5 @@ class InstructionPattern : public OperandPrototype using LoadPattern = InstructionPattern; using BitCastPattern = InstructionPattern; -class ReplacementRule -{ -public: - using Captures = OperandPrototype::Captures; - using Instruction = llvm::Instruction; - using Value = llvm::Value; - using OperandPrototypePtr = std::shared_ptr; - using Builder = llvm::IRBuilder<>; - using Replacements = std::vector>; - - using ReplaceFunction = std::function; - - void setPattern(OperandPrototypePtr &&pattern) - { - pattern_ = std::move(pattern); - } - - void setReplacer(ReplaceFunction const &replacer) - { - replacer_ = replacer; - } - - bool match(Value *value, Captures &captures) const - { - if (pattern_ == nullptr) - { - return false; - } - return pattern_->match(value, captures); - } - - bool replace(Builder &builder, Value *value, Captures &captures, Replacements &replacements) const - { - if (replacer_) - { - return replacer_(builder, value, captures, replacements); - } - - return false; - } - -private: - OperandPrototypePtr pattern_{nullptr}; - ReplaceFunction replacer_{nullptr}; -}; - -// Propposed syntax for establishing rules -// "name"_rule = ("add"_op(0_o, "value"_any ), -// "sub"_op(2_i32, "name"_reg )) => "noop"_op("value"_any, "name"_reg); } // namespace quantum } // namespace microsoft diff --git a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp new file mode 100644 index 0000000000..35da941b95 --- /dev/null +++ b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include +#include +#include + +namespace microsoft { +namespace quantum { + +static QubitAllocationManager::QubitAllocationManagerPtr QubitAllocationManager::createNew() +{ + QubitAllocationManagerPtr ret; + ret.reset(new QubitAllocationManager()); + + return ret; +} + +void QubitAllocationManager::allocate(String &&name, Index &&size) +{ + MemoryMapping map; + map.name = std::move(name); + map.index = mappings_.size(); + map.size = std::move(size); + + if (name_to_index_.find(map.name) != name_to_index_.end()) + { + throw std::runtime_error("Memory segment with name " + map.name + " already exists."); + } + + name_to_index_[map.name] = map.index; + if (!mappings_.empty()) + { + map.start = mappings_.back().end; + } + + map.end = map.start + size; + mappings_.emplace_back(std::move(map)); +} + +Index QubitAllocationManager::getOffset(String const &name) const +{ + auto it = name_to_index_.find(name); + if (it == name_to_index_.end()) + { + throw std::runtime_error("Memory segment with name " + name + " not found."); + } + auto index = it->second; + + return mappings_[index].start; +} + +void QubitAllocationManager::release(String const & /*name*/) +{} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp index 52220b1d5a..ea89c6e9ab 100644 --- a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp +++ b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp @@ -1,4 +1,6 @@ #pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. #include #include @@ -17,58 +19,19 @@ class QubitAllocationManager struct MemoryMapping { String name{""}; - Index index{0}; - Index size{0}; - Index start{0}; - Index end{0}; ///< Index not included in memory segment + Index index{0}; ///< Index of the allocation + Index size{0}; ///< Size of memory segment + Index start{0}; ///< Start index of memory segment + Index end{0}; ///< Index not included in memory segment }; using NameToIndex = std::unordered_map; using Mappings = std::vector; - static QubitAllocationManagerPtr createNew() - { - QubitAllocationManagerPtr ret; - ret.reset(new QubitAllocationManager()); - - return ret; - } - - void allocate(String &&name, Index &&size) - { - MemoryMapping map; - map.name = std::move(name); - map.index = mappings_.size(); - map.size = std::move(size); - - if (name_to_index_.find(map.name) != name_to_index_.end()) - { - throw std::runtime_error("Memory segment with name " + map.name + " already exists."); - } - - name_to_index_[map.name] = map.index; - if (!mappings_.empty()) - { - map.start = mappings_.back().end; - } - - map.end = map.start + size; - mappings_.emplace_back(std::move(map)); - } - - Index getOffset(String const &name) const - { - auto it = name_to_index_.find(name); - if (it == name_to_index_.end()) - { - throw std::runtime_error("Memory segment with name " + name + " not found."); - } - auto index = it->second; - - return mappings_[index].start; - } + static QubitAllocationManagerPtr createNew(); - void release(String const & /*name*/) - {} + void allocate(String &&name, Index &&size); + Index getOffset(String const &name) const; + void release(String const &name); private: QubitAllocationManager() = default; diff --git a/src/Passes/libs/InstructionReplacement/Rule.cpp b/src/Passes/libs/InstructionReplacement/Rule.cpp new file mode 100644 index 0000000000..34271432f9 --- /dev/null +++ b/src/Passes/libs/InstructionReplacement/Rule.cpp @@ -0,0 +1,40 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "InstructionReplacement/Pattern.hpp" + +namespace microsoft { +namespace quantum { + +void ReplacementRule::setPattern(OperandPrototypePtr &&pattern) +{ + pattern_ = std::move(pattern); +} + +void ReplacementRule::setReplacer(ReplaceFunction const &replacer) +{ + replacer_ = replacer; +} + +bool ReplacementRule::match(Value *value, Captures &captures) const +{ + if (pattern_ == nullptr) + { + return false; + } + return pattern_->match(value, captures); +} + +bool ReplacementRule::replace(Builder &builder, Value *value, Captures &captures, + Replacements &replacements) const +{ + if (replacer_) + { + return replacer_(builder, value, captures, replacements); + } + + return false; +} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/InstructionReplacement/Rule.hpp b/src/Passes/libs/InstructionReplacement/Rule.hpp new file mode 100644 index 0000000000..d9bf93d350 --- /dev/null +++ b/src/Passes/libs/InstructionReplacement/Rule.hpp @@ -0,0 +1,42 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "InstructionReplacement/Pattern.hpp" +#include "Llvm.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +class ReplacementRule +{ +public: + using Captures = OperandPrototype::Captures; + using Instruction = llvm::Instruction; + using Value = llvm::Value; + using OperandPrototypePtr = std::shared_ptr; + using Builder = llvm::IRBuilder<>; + using Replacements = std::vector>; + using ReplaceFunction = std::function; + + /// Rule configuration + /// @{ + void setPattern(OperandPrototypePtr &&pattern); + void setReplacer(ReplaceFunction const &replacer); + /// @} + + /// Operation + /// @{ + bool match(Value *value, Captures &captures) const; + bool replace(Builder &builder, Value *value, Captures &captures, + Replacements &replacements) const; + /// @} +private: + OperandPrototypePtr pattern_{nullptr}; + ReplaceFunction replacer_{nullptr}; +}; + +} // namespace quantum +} // namespace microsoft From 48118db1f30de35b89657785fe21c373cc527301 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 4 Aug 2021 14:54:21 +0200 Subject: [PATCH 054/106] Finalising refactor --- .../libs/InstructionReplacement/QubitAllocationManager.cpp | 6 ++++-- .../libs/InstructionReplacement/QubitAllocationManager.hpp | 1 + src/Passes/libs/InstructionReplacement/Rule.cpp | 3 ++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp index 35da941b95..58bda02744 100644 --- a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp +++ b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp @@ -1,6 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +#include "InstructionReplacement/QubitAllocationManager.hpp" + #include #include #include @@ -8,7 +10,7 @@ namespace microsoft { namespace quantum { -static QubitAllocationManager::QubitAllocationManagerPtr QubitAllocationManager::createNew() +QubitAllocationManager::QubitAllocationManagerPtr QubitAllocationManager::createNew() { QubitAllocationManagerPtr ret; ret.reset(new QubitAllocationManager()); @@ -38,7 +40,7 @@ void QubitAllocationManager::allocate(String &&name, Index &&size) mappings_.emplace_back(std::move(map)); } -Index QubitAllocationManager::getOffset(String const &name) const +QubitAllocationManager::Index QubitAllocationManager::getOffset(String const &name) const { auto it = name_to_index_.find(name); if (it == name_to_index_.end()) diff --git a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp index ea89c6e9ab..0f45a71cf0 100644 --- a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp +++ b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp @@ -3,6 +3,7 @@ // Licensed under the MIT License. #include +#include #include #include diff --git a/src/Passes/libs/InstructionReplacement/Rule.cpp b/src/Passes/libs/InstructionReplacement/Rule.cpp index 34271432f9..44ca962ee6 100644 --- a/src/Passes/libs/InstructionReplacement/Rule.cpp +++ b/src/Passes/libs/InstructionReplacement/Rule.cpp @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "InstructionReplacement/Pattern.hpp" +#include "InstructionReplacement/Rule.hpp" namespace microsoft { namespace quantum { @@ -22,6 +22,7 @@ bool ReplacementRule::match(Value *value, Captures &captures) const { return false; } + return pattern_->match(value, captures); } From 9b3bcbd2fb99c71cf10e39923d294e6a8cc02025 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 9 Aug 2021 07:23:44 +0200 Subject: [PATCH 055/106] Fixing bug relating to pattern creation --- .../ConstSizeArray/ConstSizeArray.qs | 29 +- .../ConstSizeArray/Makefile | 2 +- .../ConstSizeArray/qir/ConstSizeArray.ll | 2072 +++++++++++++++++ .../examples/QubitAllocationAnalysis/Makefile | 4 +- .../analysis-example.ll | 52 +- .../examples/QubitAllocationAnalysis/test.ll | 41 + 6 files changed, 2140 insertions(+), 60 deletions(-) create mode 100644 src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll create mode 100644 src/Passes/examples/QubitAllocationAnalysis/test.ll diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index 947ffc7f2e..293a4ae6d5 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -1,22 +1,13 @@ -namespace Example { - open Microsoft.Quantum.Measurement; +namespace Microsoft.Quantum.Tutorial +{ open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Canon; - - @EntryPoint() - operation Main() : Int - { - use qubits2 = Qubit[3]; - use qubits1 = Qubit[3]; - - X(qubits1[0]); - X(qubits1[1]); - X(qubits1[2]); - X(qubits2[0]); - X(qubits2[1]); - X(qubits2[2]); - - return 0; + @EntryPoint() + operation TeleportAndReset() : Unit { + use qs = Qubit[3]; + let x = [qs[1], qs[0], qs[2]]; + X(x[0]); + X(x[1]); + X(x[2]); } -} +} \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile index 59399d367e..8b2b70af22 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile @@ -9,5 +9,5 @@ comparison: clean: rm -rf bin rm -rf obj - rm -rf qir +# rm -rf qir \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll new file mode 100644 index 0000000000..4cfd943124 --- /dev/null +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll @@ -0,0 +1,2072 @@ + +%Range = type { i64, i64, i64 } +%Tuple = type opaque +%Array = type opaque +%Qubit = type opaque +%String = type opaque +%Callable = type opaque +%Result = type opaque + +@PauliI = internal constant i2 0 +@PauliX = internal constant i2 1 +@PauliY = internal constant i2 -1 +@PauliZ = internal constant i2 -2 +@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } +@PartialApplication__1 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__1 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] +@PartialApplication__2 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__3 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctladj__wrapper] +@MemoryManagement__2 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__2__RefCount, void (%Tuple*, i32)* @MemoryManagement__2__AliasCount] +@PartialApplication__4 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctladj__wrapper] +@PartialApplication__5 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctladj__wrapper] +@PartialApplication__6 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctladj__wrapper] +@PartialApplication__7 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__8 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__9 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctladj__wrapper] +@PartialApplication__10 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctladj__wrapper] +@PartialApplication__11 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctladj__wrapper] +@PartialApplication__12 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctladj__wrapper] + +define internal void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() { +entry: + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %qubit = load %Qubit*, %Qubit** %1, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %3 = bitcast i8* %2 to %Qubit** + %qubit__1 = load %Qubit*, %Qubit** %3, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit__1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %5 = bitcast i8* %4 to %Qubit** + %qubit__2 = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit__2) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + ret void +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare void @__quantum__qis__x__body(%Qubit*) + +define internal { %String* }* @Microsoft__Quantum__Diagnostics__EnableTestingViaName__body(%String* %__Item1__) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 + store %String* %__Item1__, %String** %2, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) + ret { %String* }* %1 +} + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +define internal { %String* }* @Microsoft__Quantum__Diagnostics__Test__body(%String* %ExecutionTarget) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 + store %String* %ExecutionTarget, %String** %2, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %ExecutionTarget, i32 1) + ret { %String* }* %1 +} + +define internal %Tuple* @Microsoft__Quantum__Core__Attribute__body() { +entry: + ret %Tuple* null +} + +define internal { %String* }* @Microsoft__Quantum__Core__Deprecated__body(%String* %NewName) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 + store %String* %NewName, %String** %2, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %NewName, i32 1) + ret { %String* }* %1 +} + +define internal %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { +entry: + ret %Tuple* null +} + +define internal %Tuple* @Microsoft__Quantum__Core__Inline__body() { +entry: + ret %Tuple* null +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +declare void @__quantum__qis__applyconditionallyintrinsic__body(%Array*, %Array*, %Callable*, %Callable*) + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResults = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %resultsValues = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onEqualOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 + %onNonEqualOp = load %Callable*, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %9, %Callable** %7, align 8 + store %Array* %ctls, %Array** %8, align 8 + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 + %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %14) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %14, %Callable** %12, align 8 + store %Array* %ctls, %Array** %13, align 8 + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %10) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResults = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %resultsValues = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onEqualOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 + %onNonEqualOp = load %Callable*, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %9, %Callable** %7, align 8 + store %Array* %ctls, %Array** %8, align 8 + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 + %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %14) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %14, %Callable** %12, align 8 + store %Array* %ctls, %Array** %13, align 8 + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctladj(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResults = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %resultsValues = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onEqualOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 + %onNonEqualOp = load %Callable*, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %9, %Callable** %7, align 8 + store %Array* %ctls, %Array** %8, align 8 + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__5, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 + %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %14) + call void @__quantum__rt__callable_make_controlled(%Callable* %14) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %14, %Callable** %12, align 8 + store %Array* %ctls, %Array** %13, align 8 + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__6, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +declare void @__quantum__qis__applyifelseintrinsic__body(%Result*, %Callable*, %Callable*) + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResult = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %onResultZeroOp = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onResultOneOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %8, %Callable** %6, align 8 + store %Array* %ctls, %Array** %7, align 8 + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__7, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %4) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %13, %Callable** %11, align 8 + store %Array* %ctls, %Array** %12, align 8 + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__8, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %9) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResult = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %onResultZeroOp = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onResultOneOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %8, %Callable** %6, align 8 + store %Array* %ctls, %Array** %7, align 8 + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__9, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %13, %Callable** %11, align 8 + store %Array* %ctls, %Array** %12, align 8 + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__10, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctladj(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResult = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %onResultZeroOp = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onResultOneOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %8, %Callable** %6, align 8 + store %Array* %ctls, %Array** %7, align 8 + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__11, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %13, %Callable** %11, align 8 + store %Array* %ctls, %Array** %12, align 8 + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__12, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal { %String*, %String* }* @Microsoft__Quantum__Targeting__RequiresCapability__body(%String* %Level, %String* %Reason) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %1 = bitcast %Tuple* %0 to { %String*, %String* }* + %2 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 1 + store %String* %Level, %String** %2, align 8 + store %String* %Reason, %String** %3, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %Level, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %Reason, i32 1) + ret { %String*, %String* }* %1 +} + +define internal { %String* }* @Microsoft__Quantum__Targeting__TargetInstruction__body(%String* %__Item1__) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 + store %String* %__Item1__, %String** %2, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) + ret { %String* }* %1 +} + +define void @Microsoft__Quantum__Tutorial__TeleportAndReset__Interop() #0 { +entry: + call void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() + ret void +} + +define void @Microsoft__Quantum__Tutorial__TeleportAndReset() #1 { +entry: + call void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() + ret void +} + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index 2c3ce56f1a..b6312fa92a 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -7,7 +7,9 @@ run: build-qaa analysis-example.ll opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll run-replace: build-ir analysis-example.ll - opt -load-pass-plugin ../../Debug/libs/libInstructionReplacement.dylib --passes="instruction-replacement" -S analysis-example.ll +# opt -loop-unroll -unroll-count=3 -unroll-allow-partial + opt -load-pass-plugin ../../Debug/libs/libInstructionReplacement.dylib --passes="mem2reg,simplifycfg,loop-simplify,loop-rotate,loop-unroll,instruction-replacement" -S analysis-example.ll > test.ll + opt --passes="inline" -S test.ll | opt -O1 -S build-prepare: diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll index be993311fa..f4ea11235b 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -3,42 +3,25 @@ source_filename = "qir/ConstSizeArray.ll" %Array = type opaque %Qubit = type opaque -%String = type opaque -define internal fastcc void @Example__Main__body() unnamed_addr { +define internal fastcc void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() unnamed_addr { entry: - %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) - %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits1, i64 0) + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %1 = bitcast i8* %0 to %Qubit** %qubit = load %Qubit*, %Qubit** %1, align 8 call void @__quantum__qis__x__body(%Qubit* %qubit) - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits1, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %3 = bitcast i8* %2 to %Qubit** %qubit__1 = load %Qubit*, %Qubit** %3, align 8 call void @__quantum__qis__x__body(%Qubit* %qubit__1) - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits1, i64 2) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %5 = bitcast i8* %4 to %Qubit** %qubit__2 = load %Qubit*, %Qubit** %5, align 8 call void @__quantum__qis__x__body(%Qubit* %qubit__2) - %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits2, i64 0) - %7 = bitcast i8* %6 to %Qubit** - %qubit__3 = load %Qubit*, %Qubit** %7, align 8 - call void @__quantum__qis__x__body(%Qubit* %qubit__3) - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits2, i64 1) - %9 = bitcast i8* %8 to %Qubit** - %qubit__4 = load %Qubit*, %Qubit** %9, align 8 - call void @__quantum__qis__x__body(%Qubit* %qubit__4) - %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits2, i64 2) - %11 = bitcast i8* %10 to %Qubit** - %qubit__5 = load %Qubit*, %Qubit** %11, align 8 - call void @__quantum__qis__x__body(%Qubit* %qubit__5) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits2) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qs) ret void } @@ -52,26 +35,17 @@ declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define i64 @Example__Main__Interop() local_unnamed_addr #0 { +define void @Microsoft__Quantum__Tutorial__TeleportAndReset__Interop() local_unnamed_addr #0 { entry: - call fastcc void @Example__Main__body() - ret i64 0 + call fastcc void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() + ret void } -define void @Example__Main() local_unnamed_addr #1 { +define void @Microsoft__Quantum__Tutorial__TeleportAndReset() local_unnamed_addr #1 { entry: - call fastcc void @Example__Main__body() - %0 = call %String* @__quantum__rt__int_to_string(i64 0) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + call fastcc void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() ret void } -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr - attributes #0 = { "InteropFriendly" } attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/test.ll b/src/Passes/examples/QubitAllocationAnalysis/test.ll new file mode 100644 index 0000000000..94e3f72897 --- /dev/null +++ b/src/Passes/examples/QubitAllocationAnalysis/test.ll @@ -0,0 +1,41 @@ +; ModuleID = 'analysis-example.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Qubit = type opaque +%Array = type opaque + +define internal fastcc void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() unnamed_addr { +entry: + %qubit = inttoptr i64 0 to %Qubit* + call void @__quantum__qis__x__body(%Qubit* %qubit) + %qubit__1 = inttoptr i64 1 to %Qubit* + call void @__quantum__qis__x__body(%Qubit* %qubit__1) + %qubit__2 = inttoptr i64 2 to %Qubit* + call void @__quantum__qis__x__body(%Qubit* %qubit__2) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr + +declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr + +define void @Microsoft__Quantum__Tutorial__TeleportAndReset__Interop() local_unnamed_addr #0 { +entry: + call fastcc void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() + ret void +} + +define void @Microsoft__Quantum__Tutorial__TeleportAndReset() local_unnamed_addr #1 { +entry: + call fastcc void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() + ret void +} + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } From 89ac05322d0ee2cf3c74f99d703353e87359ce51 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 9 Aug 2021 07:32:00 +0200 Subject: [PATCH 056/106] Intermediate reorganisation --- .../Rules/OperandPrototype.hpp} | 21 +++- src/Passes/include/Rules/ReplacementRule.hpp | 113 ++++++++++++++++++ .../InstructionReplacement.cpp | 106 +++++++++++----- .../QubitAllocationManager.cpp | 51 ++++++-- .../QubitAllocationManager.hpp | 9 +- .../libs/InstructionReplacement/Rule.hpp | 42 ------- .../Pattern.cpp => src/OperandPrototype.cpp} | 21 ++++ .../Rule.cpp => src/ReplacementRule.cpp} | 0 8 files changed, 276 insertions(+), 87 deletions(-) rename src/Passes/{libs/InstructionReplacement/Pattern.hpp => include/Rules/OperandPrototype.hpp} (73%) create mode 100644 src/Passes/include/Rules/ReplacementRule.hpp delete mode 100644 src/Passes/libs/InstructionReplacement/Rule.hpp rename src/Passes/{libs/InstructionReplacement/Pattern.cpp => src/OperandPrototype.cpp} (86%) rename src/Passes/{libs/InstructionReplacement/Rule.cpp => src/ReplacementRule.cpp} (100%) diff --git a/src/Passes/libs/InstructionReplacement/Pattern.hpp b/src/Passes/include/Rules/OperandPrototype.hpp similarity index 73% rename from src/Passes/libs/InstructionReplacement/Pattern.hpp rename to src/Passes/include/Rules/OperandPrototype.hpp index c70cb75666..43e1c7b296 100644 --- a/src/Passes/libs/InstructionReplacement/Pattern.hpp +++ b/src/Passes/include/Rules/OperandPrototype.hpp @@ -23,7 +23,8 @@ class OperandPrototype OperandPrototype() = default; virtual ~OperandPrototype(); - virtual bool match(Value *value, Captures &captures) const = 0; + virtual bool match(Value *value, Captures &captures) const = 0; + virtual Child copy() const = 0; void addChild(Child const &child) { @@ -39,11 +40,17 @@ class OperandPrototype bool fail(Value *value, Captures &captures) const; bool success(Value *value, Captures &captures) const; -private: bool matchChildren(Value *value, Captures &captures) const; void capture(Value *value, Captures &captures) const; void uncapture(Value *value, Captures &captures) const; + void copyPropertiesFrom(OperandPrototype const &other) + { + capture_name_ = other.capture_name_; + children_ = other.children_; + } + +private: std::string capture_name_{""}; Children children_{}; }; @@ -53,7 +60,8 @@ class AnyPattern : public OperandPrototype public: AnyPattern(); ~AnyPattern() override; - bool match(Value *instr, Captures &captures) const override; + bool match(Value *instr, Captures &captures) const override; + Child copy() const override; }; class CallPattern : public OperandPrototype @@ -64,7 +72,8 @@ class CallPattern : public OperandPrototype ~CallPattern() override; - bool match(Value *instr, Captures &captures) const override; + bool match(Value *instr, Captures &captures) const override; + Child copy() const override; private: String name_{}; @@ -76,9 +85,11 @@ class InstructionPattern : public OperandPrototype public: using OperandPrototype::OperandPrototype; ~InstructionPattern() override; - bool match(Value *instr, Captures &captures) const override; + bool match(Value *instr, Captures &captures) const override; + Child copy() const override; }; +using StorePattern = InstructionPattern; using LoadPattern = InstructionPattern; using BitCastPattern = InstructionPattern; diff --git a/src/Passes/include/Rules/ReplacementRule.hpp b/src/Passes/include/Rules/ReplacementRule.hpp new file mode 100644 index 0000000000..0c08c1387a --- /dev/null +++ b/src/Passes/include/Rules/ReplacementRule.hpp @@ -0,0 +1,113 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "InstructionReplacement/Pattern.hpp" +#include "Llvm.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +class ReplacementRule +{ +public: + using Captures = OperandPrototype::Captures; + using Instruction = llvm::Instruction; + using Value = llvm::Value; + using OperandPrototypePtr = std::shared_ptr; + using Builder = llvm::IRBuilder<>; + using Replacements = std::vector>; + using ReplaceFunction = std::function; + + /// Rule configuration + /// @{ + void setPattern(OperandPrototypePtr &&pattern); + void setReplacer(ReplaceFunction const &replacer); + /// @} + + /// Operation + /// @{ + bool match(Value *value, Captures &captures) const; + bool replace(Builder &builder, Value *value, Captures &captures, + Replacements &replacements) const; + /// @} +private: + OperandPrototypePtr pattern_{nullptr}; + ReplaceFunction replacer_{nullptr}; +}; + +namespace patterns { +using OperandPrototypePtr = std::shared_ptr; +template +inline OperandPrototypePtr Call(std::string const &name, Args... args) +{ + OperandPrototypePtr ret = std::make_shared(name); + std::vector arguments{args...}; + + // Adding arguments to matching + for (auto &a : arguments) + { + ret->addChild(a); + } + + // Function name is kept in the last operand + ret->addChild(std::make_shared()); + + return ret; +} + +inline OperandPrototypePtr BitCast(OperandPrototypePtr arg) +{ + auto cast_pattern = std::make_shared(); + + cast_pattern->addChild(arg); + return static_cast(cast_pattern); +} + +inline OperandPrototypePtr Load(OperandPrototypePtr arg) +{ + auto ret = std::make_shared(); + + ret->addChild(arg); + return static_cast(ret); +} + +inline OperandPrototypePtr Store(OperandPrototypePtr target, OperandPrototypePtr value) +{ + auto ret = std::make_shared(); + + ret->addChild(target); + ret->addChild(value); + return static_cast(ret); +} +static std::shared_ptr _ = std::make_shared(); + +class Capture +{ +public: + Capture(std::string const &name) + : name_{name} + {} + + OperandPrototypePtr operator=(OperandPrototypePtr const &other) + { + auto ret = other->copy(); + ret->enableCapture(name_); + return ret; + } + +private: + std::string name_{}; +}; + +inline Capture operator""_cap(char const *name, std::size_t) +{ + return Capture(name); +} + +} // namespace patterns + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp b/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp index b149c42ae6..0b8026a19e 100644 --- a/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp +++ b/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp @@ -13,36 +13,50 @@ namespace quantum { InstructionReplacementPass::InstructionReplacementPass() { + using namespace microsoft::quantum::patterns; + // Shared pointer to be captured in the lambdas of the patterns // Note that you cannot capture this as the reference is destroyed upon // copy. Since PassInfoMixin requires copy, such a construct would break auto alloc_manager = QubitAllocationManager::createNew(); - // Pattern 1 - Get array index - auto array_name = std::make_shared(); - auto index = std::make_shared(); - array_name->enableCapture("arrayName"); - index->enableCapture("index"); + // Pattern 0 - Find type + ReplacementRule rule0; + + auto get_element = + Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); + rule0.setPattern("cast"_cap = BitCast("getElement"_cap = get_element)); + rule0.setReplacer([alloc_manager](Builder &, Value *, Captures &cap, Replacements &) { + llvm::errs() << "Identified an access attempt" + << "\n"; + + auto type = cap["cast"]->getType(); - auto get_element = std::make_shared("__quantum__rt__array_get_element_ptr_1d"); - get_element->addChild(array_name); - get_element->addChild(index); - get_element->enableCapture("getelement"); + // This rule only deals with access to arrays of opaque types + auto ptr_type = llvm::dyn_cast(type); + if (ptr_type == nullptr) + { + return false; + } - // Function name - get_element->addChild(std::make_shared()); + auto array = cap["arrayName"]; - auto load_pattern = std::make_shared(); - auto cast_pattern = std::make_shared(); + llvm::errs() << *array->getType() << " of " << *type << " " << type->isPointerTy() << " " + << *type->getPointerElementType() << " " << type->isArrayTy() << "\n"; + return false; + }); + rules_.emplace_back(std::move(rule0)); - cast_pattern->addChild(get_element); - cast_pattern->enableCapture("cast"); + // Pattern 1 - Get array index - load_pattern->addChild(cast_pattern); + // auto get_element = + // Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); + auto cast_pattern = BitCast("getElement"_cap = get_element); + auto load_pattern = Load("cast"_cap = cast_pattern); // Rule 1 ReplacementRule rule1; - rule1.setPattern(load_pattern); + rule1.setPattern(std::move(load_pattern)); // Replacement details rule1.setReplacer( @@ -83,7 +97,7 @@ InstructionReplacementPass::InstructionReplacementPass() replacements.push_back({llvm::dyn_cast(val), instr}); // Deleting the getelement and cast operations - replacements.push_back({llvm::dyn_cast(cap["getelement"]), nullptr}); + replacements.push_back({llvm::dyn_cast(cap["getElement"]), nullptr}); replacements.push_back({llvm::dyn_cast(cap["cast"]), nullptr}); return true; @@ -112,17 +126,34 @@ InstructionReplacementPass::InstructionReplacementPass() // Rule 4 - perform static allocation and delete __quantum__rt__qubit_allocate_array ReplacementRule rule4; - auto allocate_call = std::make_shared("__quantum__rt__qubit_allocate_array"); + auto allocate_call = Call("__quantum__rt__qubit_allocate_array", "size"_cap = _); + rule4.setPattern(std::move(allocate_call)); + + rule4.setReplacer( + [alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { + auto cst = llvm::dyn_cast(cap["size"]); + if (cst == nullptr) + { + return false; + } + + auto llvm_size = cst->getValue(); + auto name = val->getName().str(); + alloc_manager->allocate(name, llvm_size.getZExtValue()); - auto size = std::make_shared(); + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); - size->enableCapture("size"); - allocate_call->addChild(size); - allocate_call->addChild(std::make_shared()); + rules_.emplace_back(std::move(rule4)); - rule4.setPattern(allocate_call); + // Rule 5 - standard array allocation + ReplacementRule rule5; + auto allocate_array_call = + Call("__quantum__rt__array_create_1d", "elementSize"_cap = _, "size"_cap = _); + rule5.setPattern(std::move(allocate_array_call)); - rule4.setReplacer( + rule5.setReplacer( [alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { auto cst = llvm::dyn_cast(cap["size"]); if (cst == nullptr) @@ -131,12 +162,33 @@ InstructionReplacementPass::InstructionReplacementPass() } auto llvm_size = cst->getValue(); - alloc_manager->allocate(val->getName().str(), llvm_size.getZExtValue()); + alloc_manager->allocate(val->getName().str(), llvm_size.getZExtValue(), true); replacements.push_back({llvm::dyn_cast(val), nullptr}); return true; }); - rules_.emplace_back(std::move(rule4)); + rules_.emplace_back(std::move(rule5)); + + // Rule 6 - track stored values + + auto get_target_element = Call("__quantum__rt__array_get_element_ptr_1d", + "targetArrayName"_cap = _, "targetIndex"_cap = _); + auto get_value_element = Call("__quantum__rt__array_get_element_ptr_1d", "valueArrayName"_cap = _, + "targetValue"_cap = _); + auto target = BitCast("target"_cap = get_target_element); + auto value = BitCast("value"_cap = get_element); + + auto store_pattern = Store(target, value); + + ReplacementRule rule6; + rule6.setPattern(std::move(store_pattern)); + + rule6.setReplacer([alloc_manager](Builder &, Value *, Captures &, Replacements &) { + llvm::errs() << "Found store pattern" + << "\n"; + return false; + }); + rules_.emplace_back(std::move(rule6)); } llvm::PreservedAnalyses InstructionReplacementPass::run(llvm::Function &function, diff --git a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp index 58bda02744..e22b48176f 100644 --- a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp +++ b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp @@ -18,26 +18,53 @@ QubitAllocationManager::QubitAllocationManagerPtr QubitAllocationManager::create return ret; } -void QubitAllocationManager::allocate(String &&name, Index &&size) +void QubitAllocationManager::allocate(String const &name, Index const &size, bool value_only) { - MemoryMapping map; - map.name = std::move(name); - map.index = mappings_.size(); - map.size = std::move(size); + // Creating an array to store values + // llvm::errs() << "Allocating " << name << " " << size << "\n"; + if (arrays_.find(name) != arrays_.end()) + { + throw std::runtime_error("Array with name " + name + " already exists."); + } - if (name_to_index_.find(map.name) != name_to_index_.end()) + arrays_[name].resize(size); + for (auto &v : arrays_[name]) { - throw std::runtime_error("Memory segment with name " + map.name + " already exists."); + v = nullptr; } - name_to_index_[map.name] = map.index; - if (!mappings_.empty()) + // Creating a memory segment mappign in case we are dealing with qubits + if (!value_only) { - map.start = mappings_.back().end; + MemoryMapping map; + map.name = name; + map.index = mappings_.size(); + map.size = size; + + if (name_to_index_.find(map.name) != name_to_index_.end()) + { + throw std::runtime_error("Memory segment with name " + map.name + " already exists."); + } + + name_to_index_[map.name] = map.index; + if (!mappings_.empty()) + { + map.start = mappings_.back().end; + } + + map.end = map.start + size; + mappings_.emplace_back(std::move(map)); } +} - map.end = map.start + size; - mappings_.emplace_back(std::move(map)); +QubitAllocationManager::Array &QubitAllocationManager::get(String const &name) +{ + auto it = arrays_.find(name); + if (it == arrays_.end()) + { + throw std::runtime_error("Array with name " + name + " does not exists."); + } + return it->second; } QubitAllocationManager::Index QubitAllocationManager::getOffset(String const &name) const diff --git a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp index 0f45a71cf0..70bb7e55a2 100644 --- a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp +++ b/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp @@ -1,6 +1,7 @@ #pragma once // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +#include "Llvm.hpp" #include #include @@ -16,6 +17,8 @@ class QubitAllocationManager using Index = uint64_t; using String = std::string; using QubitAllocationManagerPtr = std::shared_ptr; + using Array = std::vector; + using Arrays = std::unordered_map; struct MemoryMapping { @@ -30,15 +33,19 @@ class QubitAllocationManager static QubitAllocationManagerPtr createNew(); - void allocate(String &&name, Index &&size); + void allocate(String const &name, Index const &size, bool value_only = false); Index getOffset(String const &name) const; void release(String const &name); + Array &get(String const &name); + private: QubitAllocationManager() = default; NameToIndex name_to_index_; Mappings mappings_; + + Arrays arrays_; }; } // namespace quantum diff --git a/src/Passes/libs/InstructionReplacement/Rule.hpp b/src/Passes/libs/InstructionReplacement/Rule.hpp deleted file mode 100644 index d9bf93d350..0000000000 --- a/src/Passes/libs/InstructionReplacement/Rule.hpp +++ /dev/null @@ -1,42 +0,0 @@ -#pragma once -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -#include "InstructionReplacement/Pattern.hpp" -#include "Llvm.hpp" - -#include -#include - -namespace microsoft { -namespace quantum { -class ReplacementRule -{ -public: - using Captures = OperandPrototype::Captures; - using Instruction = llvm::Instruction; - using Value = llvm::Value; - using OperandPrototypePtr = std::shared_ptr; - using Builder = llvm::IRBuilder<>; - using Replacements = std::vector>; - using ReplaceFunction = std::function; - - /// Rule configuration - /// @{ - void setPattern(OperandPrototypePtr &&pattern); - void setReplacer(ReplaceFunction const &replacer); - /// @} - - /// Operation - /// @{ - bool match(Value *value, Captures &captures) const; - bool replace(Builder &builder, Value *value, Captures &captures, - Replacements &replacements) const; - /// @} -private: - OperandPrototypePtr pattern_{nullptr}; - ReplaceFunction replacer_{nullptr}; -}; - -} // namespace quantum -} // namespace microsoft diff --git a/src/Passes/libs/InstructionReplacement/Pattern.cpp b/src/Passes/src/OperandPrototype.cpp similarity index 86% rename from src/Passes/libs/InstructionReplacement/Pattern.cpp rename to src/Passes/src/OperandPrototype.cpp index b05b9310d2..0e530c0163 100644 --- a/src/Passes/libs/InstructionReplacement/Pattern.cpp +++ b/src/Passes/src/OperandPrototype.cpp @@ -102,6 +102,13 @@ bool CallPattern::match(Value *instr, Captures &captures) const return success(instr, captures); } +CallPattern::Child CallPattern::copy() const +{ + auto ret = std::make_shared(name_); + ret->copyPropertiesFrom(*this); + return std::move(ret); +} + AnyPattern::AnyPattern() = default; AnyPattern::~AnyPattern() = default; bool AnyPattern::match(Value *instr, Captures &captures) const @@ -109,6 +116,11 @@ bool AnyPattern::match(Value *instr, Captures &captures) const return success(instr, captures); } +AnyPattern::Child AnyPattern::copy() const +{ + return std::make_shared(); +} + template InstructionPattern::~InstructionPattern() = default; template @@ -123,6 +135,14 @@ bool InstructionPattern::match(Value *instr, Captures &captures) const return success(instr, captures); } +template +typename InstructionPattern::Child InstructionPattern::copy() const +{ + auto ret = std::make_shared>(); + ret->copyPropertiesFrom(*this); + return std::move(ret); +} + // TODO(tfr): This seems to be a bug in LLVM. Template instantiations in // a single translation unit is not supposed to reinstantiate across other // translation units. @@ -135,6 +155,7 @@ bool InstructionPattern::match(Value *instr, Captures &captures) const // for more information #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wweak-template-vtables" +template class InstructionPattern; template class InstructionPattern; template class InstructionPattern; #pragma clang diagnostic pop diff --git a/src/Passes/libs/InstructionReplacement/Rule.cpp b/src/Passes/src/ReplacementRule.cpp similarity index 100% rename from src/Passes/libs/InstructionReplacement/Rule.cpp rename to src/Passes/src/ReplacementRule.cpp From 1f35041f813e670b0c030173a14912fffe6875ff Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 9 Aug 2021 07:48:08 +0200 Subject: [PATCH 057/106] Finishing refacto --- src/Passes/CMakeLists.txt | 14 ++++++++++++++ src/Passes/include/Rules/ReplacementRule.hpp | 2 +- src/Passes/libs/CMakeLists.txt | 1 + .../InstructionReplacement.hpp | 4 ++-- src/Passes/src/OperandPrototype.cpp | 2 +- src/Passes/src/ReplacementRule.cpp | 2 +- 6 files changed, 20 insertions(+), 5 deletions(-) diff --git a/src/Passes/CMakeLists.txt b/src/Passes/CMakeLists.txt index f7ccafaa22..c1ce61e892 100644 --- a/src/Passes/CMakeLists.txt +++ b/src/Passes/CMakeLists.txt @@ -44,6 +44,20 @@ link_directories(${LLVM_LIBRARY_DIRS}) add_definitions(${LLVM_DEFINITIONS}) include_directories(${CMAKE_SOURCE_DIR}/src) +# Top level lib +file(GLOB sources RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) + +add_library(Passes + SHARED + ${sources}) +target_include_directories( + Passes + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/include" +) +target_link_libraries(Passes + "$<$:-undefined dynamic_lookup>") + # Adding the libraries add_subdirectory(libs) add_subdirectory(tests) \ No newline at end of file diff --git a/src/Passes/include/Rules/ReplacementRule.hpp b/src/Passes/include/Rules/ReplacementRule.hpp index 0c08c1387a..93240aa6da 100644 --- a/src/Passes/include/Rules/ReplacementRule.hpp +++ b/src/Passes/include/Rules/ReplacementRule.hpp @@ -2,8 +2,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "InstructionReplacement/Pattern.hpp" #include "Llvm.hpp" +#include "Rules/OperandPrototype.hpp" #include #include diff --git a/src/Passes/libs/CMakeLists.txt b/src/Passes/libs/CMakeLists.txt index 3cc4850ad9..733b72c1a5 100644 --- a/src/Passes/libs/CMakeLists.txt +++ b/src/Passes/libs/CMakeLists.txt @@ -37,6 +37,7 @@ foreach(pass_plugin ${ALL_PASSES}) # Linking + target_link_libraries(${pass_plugin} Passes) target_link_libraries(${pass_plugin} "$<$:-undefined dynamic_lookup>") diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp b/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp index 4186ed200b..f4df4be49d 100644 --- a/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp +++ b/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp @@ -2,10 +2,10 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "InstructionReplacement/Pattern.hpp" #include "InstructionReplacement/QubitAllocationManager.hpp" -#include "InstructionReplacement/Rule.hpp" #include "Llvm.hpp" +#include "Rules/OperandPrototype.hpp" +#include "Rules/ReplacementRule.hpp" #include diff --git a/src/Passes/src/OperandPrototype.cpp b/src/Passes/src/OperandPrototype.cpp index 0e530c0163..7c7653da6a 100644 --- a/src/Passes/src/OperandPrototype.cpp +++ b/src/Passes/src/OperandPrototype.cpp @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "InstructionReplacement/Pattern.hpp" +#include "Rules/OperandPrototype.hpp" namespace microsoft { namespace quantum { diff --git a/src/Passes/src/ReplacementRule.cpp b/src/Passes/src/ReplacementRule.cpp index 44ca962ee6..c693440d0f 100644 --- a/src/Passes/src/ReplacementRule.cpp +++ b/src/Passes/src/ReplacementRule.cpp @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "InstructionReplacement/Rule.hpp" +#include "Rules/ReplacementRule.hpp" namespace microsoft { namespace quantum { From e605c6285cd8da924fb2f813d55a14892d733ce5 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 9 Aug 2021 09:10:17 +0200 Subject: [PATCH 058/106] Updating PoC --- src/Passes/CMakeLists.txt | 12 ++++++++- src/Passes/README.md | 46 ++++++++++++++++++++++++++++++++ src/Passes/include/Llvm.hpp | 5 +++- src/Passes/undefined/adaptor.cpp | 24 +++++++++++++++++ 4 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 src/Passes/undefined/adaptor.cpp diff --git a/src/Passes/CMakeLists.txt b/src/Passes/CMakeLists.txt index c1ce61e892..b0d344ec3a 100644 --- a/src/Passes/CMakeLists.txt +++ b/src/Passes/CMakeLists.txt @@ -60,4 +60,14 @@ target_link_libraries(Passes # Adding the libraries add_subdirectory(libs) -add_subdirectory(tests) \ No newline at end of file +add_subdirectory(tests) + +add_executable(adaptor undefined/adaptor.cpp) +target_include_directories( + adaptor + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/include" +) +llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit orcshared orctargetprocess x86asmparser x86codegen x86desc x86disassembler x86info interpreter) + +target_link_libraries(adaptor ${llvm_libs}) diff --git a/src/Passes/README.md b/src/Passes/README.md index a70a6835a8..b6afba93b5 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -325,3 +325,49 @@ If you forget to instantiate this variable in your corresponding `.cpp` file, ``` everything will compile, but the pass will fail to load. There will be no linking errors either. + +# Notes on QIR Profile Tool (QIR Adaptor Tool) + +Target: + +``` +./qat -profile=base-profile.yml -S file.ir > adapted.ir +``` + +## Loading IR + +https://stackoverflow.com/questions/22239801/how-to-load-llvm-bitcode-file-from-an-ifstream/22241953 + +## Load passes LLVM passes + +https://llvm.org/docs/tutorial/MyFirstLanguageFrontend/LangImpl04.html + +## Load custom passes + +## How to run analysis and transformation + +https://stackoverflow.com/questions/53501830/running-standard-optimization-passes-on-a-llvm-module + +## Profile specification + +```yaml +name: profile-name +displayName: Profile Name +pipeline: + - passName: loopUnroll + - passName: functionInline + - passName: staticQubitAllocation + - passName: staticMemory + - passName: ignoreCall + config: + functionName: +specification: + - passName: requireNoArithmetic + - passName: requireNoStaticAllocation + - passName: requireReducedFunctionsAvailability + config: + functions: + - +``` + +Decent YAML library: https://github.com/jbeder/yaml-cpp diff --git a/src/Passes/include/Llvm.hpp b/src/Passes/include/Llvm.hpp index 80a4728b83..3611bf8cba 100644 --- a/src/Passes/include/Llvm.hpp +++ b/src/Passes/include/Llvm.hpp @@ -41,11 +41,14 @@ #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/LLVMContext.h" -#include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/IR/Verifier.h" +// Reader tool +#include "llvm/IRReader/IRReader.h" +#include "llvm/Support/SourceMgr.h" + #if defined(__clang__) #pragma clang diagnostic pop #endif diff --git a/src/Passes/undefined/adaptor.cpp b/src/Passes/undefined/adaptor.cpp new file mode 100644 index 0000000000..2fff0b74f0 --- /dev/null +++ b/src/Passes/undefined/adaptor.cpp @@ -0,0 +1,24 @@ +#include "Llvm.hpp" + +using namespace llvm; +int main(int /*argc*/, char **argv) +{ + LLVMContext context; + SMDiagnostic error; + auto module = parseIRFile(argv[1], error, context); + if (module) + { + ModulePassManager MPM; + FunctionPassManager FPM; + + // InstSimplifyPass is a function pass + FPM.addPass(LoopSimplifyPass()); + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + + MPM.run(*module); + // m->print(llvm) + llvm::errs() << *module << "\n"; + } + + return 0; +} From 921482be0a265e494c2e80b913469205dd1aba0b Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 9 Aug 2021 09:46:24 +0200 Subject: [PATCH 059/106] Finishing feasibility --- .../ConstSizeArray/ConstSizeArray.qs | 11 +- .../ConstSizeArray/qir/ConstSizeArray.ll | 100 ++++++++++-------- .../analysis-example.ll | 18 ++-- .../examples/QubitAllocationAnalysis/test.ll | 18 ++-- src/Passes/undefined/adaptor.cpp | 2 +- 5 files changed, 78 insertions(+), 71 deletions(-) diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index 293a4ae6d5..96330ce7a0 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -1,13 +1,12 @@ -namespace Microsoft.Quantum.Tutorial +namespace Feasibility { open Microsoft.Quantum.Intrinsic; @EntryPoint() - operation TeleportAndReset() : Unit { + operation QubitMapping() : Unit { use qs = Qubit[3]; - let x = [qs[1], qs[0], qs[2]]; - X(x[0]); - X(x[1]); - X(x[2]); + for q in 8..10 { + X(qs[q - 8]); + } } } \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll index 4cfd943124..6e6d17129b 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll @@ -27,22 +27,30 @@ @PartialApplication__11 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctladj__wrapper] @PartialApplication__12 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctladj__wrapper] -define internal void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() { +define internal void @Feasibility__QubitMapping__body() { entry: %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) call void @__quantum__rt__array_update_alias_count(%Array* %qs, i32 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %1 = bitcast i8* %0 to %Qubit** - %qubit = load %Qubit*, %Qubit** %1, align 8 - call void @__quantum__qis__x__body(%Qubit* %qubit) - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %q = phi i64 [ 8, %entry ], [ %4, %exiting__1 ] + %0 = icmp sle i64 %q, 10 + br i1 %0, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %1 = sub i64 %q, 8 + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 %1) %3 = bitcast i8* %2 to %Qubit** - %qubit__1 = load %Qubit*, %Qubit** %3, align 8 - call void @__quantum__qis__x__body(%Qubit* %qubit__1) - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %5 = bitcast i8* %4 to %Qubit** - %qubit__2 = load %Qubit*, %Qubit** %5, align 8 - call void @__quantum__qis__x__body(%Qubit* %qubit__2) + %qubit = load %Qubit*, %Qubit** %3, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %4 = add i64 %q, 1 + br label %header__1 + +exit__1: ; preds = %header__1 call void @__quantum__rt__array_update_alias_count(%Array* %qs, i32 -1) call void @__quantum__rt__qubit_release_array(%Array* %qs) ret void @@ -84,6 +92,36 @@ entry: ret { %String* }* %1 } +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + define internal %Tuple* @Microsoft__Quantum__Core__Attribute__body() { entry: ret %Tuple* null @@ -2003,36 +2041,6 @@ entry: ret void } -define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { -entry: - call void @__quantum__qis__x__body(%Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { -entry: - call void @__quantum__qis__x__body(%Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - ret void -} - -declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) - -define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - ret void -} - define internal { %String*, %String* }* @Microsoft__Quantum__Targeting__RequiresCapability__body(%String* %Level, %String* %Reason) { entry: %0 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) @@ -2056,15 +2064,15 @@ entry: ret { %String* }* %1 } -define void @Microsoft__Quantum__Tutorial__TeleportAndReset__Interop() #0 { +define void @Feasibility__QubitMapping__Interop() #0 { entry: - call void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() + call void @Feasibility__QubitMapping__body() ret void } -define void @Microsoft__Quantum__Tutorial__TeleportAndReset() #1 { +define void @Feasibility__QubitMapping() #1 { entry: - call void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() + call void @Feasibility__QubitMapping__body() ret void } diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll index f4ea11235b..6ceb4d6776 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -4,7 +4,7 @@ source_filename = "qir/ConstSizeArray.ll" %Array = type opaque %Qubit = type opaque -define internal fastcc void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() unnamed_addr { +define internal fastcc void @Feasibility__QubitMapping__body() unnamed_addr { entry: %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) call void @__quantum__rt__array_update_alias_count(%Array* %qs, i32 1) @@ -14,12 +14,12 @@ entry: call void @__quantum__qis__x__body(%Qubit* %qubit) %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %3 = bitcast i8* %2 to %Qubit** - %qubit__1 = load %Qubit*, %Qubit** %3, align 8 - call void @__quantum__qis__x__body(%Qubit* %qubit__1) + %qubit.1 = load %Qubit*, %Qubit** %3, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit.1) %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %5 = bitcast i8* %4 to %Qubit** - %qubit__2 = load %Qubit*, %Qubit** %5, align 8 - call void @__quantum__qis__x__body(%Qubit* %qubit__2) + %qubit.2 = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit.2) call void @__quantum__rt__array_update_alias_count(%Array* %qs, i32 -1) call void @__quantum__rt__qubit_release_array(%Array* %qs) ret void @@ -35,15 +35,15 @@ declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr -define void @Microsoft__Quantum__Tutorial__TeleportAndReset__Interop() local_unnamed_addr #0 { +define void @Feasibility__QubitMapping__Interop() local_unnamed_addr #0 { entry: - call fastcc void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() + call fastcc void @Feasibility__QubitMapping__body() ret void } -define void @Microsoft__Quantum__Tutorial__TeleportAndReset() local_unnamed_addr #1 { +define void @Feasibility__QubitMapping() local_unnamed_addr #1 { entry: - call fastcc void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() + call fastcc void @Feasibility__QubitMapping__body() ret void } diff --git a/src/Passes/examples/QubitAllocationAnalysis/test.ll b/src/Passes/examples/QubitAllocationAnalysis/test.ll index 94e3f72897..b4f2651a62 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/test.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/test.ll @@ -4,14 +4,14 @@ source_filename = "qir/ConstSizeArray.ll" %Qubit = type opaque %Array = type opaque -define internal fastcc void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() unnamed_addr { +define internal fastcc void @Feasibility__QubitMapping__body() unnamed_addr { entry: %qubit = inttoptr i64 0 to %Qubit* call void @__quantum__qis__x__body(%Qubit* %qubit) - %qubit__1 = inttoptr i64 1 to %Qubit* - call void @__quantum__qis__x__body(%Qubit* %qubit__1) - %qubit__2 = inttoptr i64 2 to %Qubit* - call void @__quantum__qis__x__body(%Qubit* %qubit__2) + %qubit.1 = inttoptr i64 1 to %Qubit* + call void @__quantum__qis__x__body(%Qubit* %qubit.1) + %qubit.2 = inttoptr i64 2 to %Qubit* + call void @__quantum__qis__x__body(%Qubit* %qubit.2) ret void } @@ -25,15 +25,15 @@ declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr -define void @Microsoft__Quantum__Tutorial__TeleportAndReset__Interop() local_unnamed_addr #0 { +define void @Feasibility__QubitMapping__Interop() local_unnamed_addr #0 { entry: - call fastcc void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() + call fastcc void @Feasibility__QubitMapping__body() ret void } -define void @Microsoft__Quantum__Tutorial__TeleportAndReset() local_unnamed_addr #1 { +define void @Feasibility__QubitMapping() local_unnamed_addr #1 { entry: - call fastcc void @Microsoft__Quantum__Tutorial__TeleportAndReset__body() + call fastcc void @Feasibility__QubitMapping__body() ret void } diff --git a/src/Passes/undefined/adaptor.cpp b/src/Passes/undefined/adaptor.cpp index 2fff0b74f0..f699311118 100644 --- a/src/Passes/undefined/adaptor.cpp +++ b/src/Passes/undefined/adaptor.cpp @@ -15,7 +15,7 @@ int main(int /*argc*/, char **argv) FPM.addPass(LoopSimplifyPass()); MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); - MPM.run(*module); + // MPM.run(*module); // m->print(llvm) llvm::errs() << *module << "\n"; } From 1899e41cec3220c6d60f5bb34cc42a680ea22647 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 9 Aug 2021 13:45:09 +0200 Subject: [PATCH 060/106] Improving repository structure and preparing for dual build system support --- src/Passes/{include => Source/LLvm}/Llvm.hpp | 0 src/Passes/{libs => Source/Passes}/CMakeLists.txt | 0 .../Passes}/ExpandStaticAllocation/ExpandStaticAllocation.cpp | 0 .../Passes}/ExpandStaticAllocation/ExpandStaticAllocation.hpp | 0 .../Passes}/ExpandStaticAllocation/LibExpandStaticAllocation.cpp | 0 .../Passes}/ExpandStaticAllocation/SPECIFICATION.md | 0 .../Passes}/InstructionReplacement/InstructionReplacement.cpp | 0 .../Passes}/InstructionReplacement/InstructionReplacement.hpp | 0 .../Passes}/InstructionReplacement/LibInstructionReplacement.cpp | 0 .../Passes}/InstructionReplacement/QubitAllocationManager.cpp | 0 .../Passes}/InstructionReplacement/QubitAllocationManager.hpp | 0 .../Passes}/InstructionReplacement/SPECIFICATION.md | 0 src/Passes/{libs => Source/Passes}/OpsCounter/LibOpsCounter.cpp | 0 src/Passes/{libs => Source/Passes}/OpsCounter/OpsCounter.cpp | 0 src/Passes/{libs => Source/Passes}/OpsCounter/OpsCounter.hpp | 0 src/Passes/{libs => Source/Passes}/OpsCounter/SPECIFICATION.md | 0 .../QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp | 0 .../Passes}/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp | 0 .../Passes}/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp | 0 .../Passes}/QubitAllocationAnalysis/SPECIFICATION.md | 0 src/Passes/{src => Source/Rules}/OperandPrototype.cpp | 0 src/Passes/{include => Source}/Rules/OperandPrototype.hpp | 0 src/Passes/{src => Source/Rules}/ReplacementRule.cpp | 0 src/Passes/{include => Source}/Rules/ReplacementRule.hpp | 0 24 files changed, 0 insertions(+), 0 deletions(-) rename src/Passes/{include => Source/LLvm}/Llvm.hpp (100%) rename src/Passes/{libs => Source/Passes}/CMakeLists.txt (100%) rename src/Passes/{libs => Source/Passes}/ExpandStaticAllocation/ExpandStaticAllocation.cpp (100%) rename src/Passes/{libs => Source/Passes}/ExpandStaticAllocation/ExpandStaticAllocation.hpp (100%) rename src/Passes/{libs => Source/Passes}/ExpandStaticAllocation/LibExpandStaticAllocation.cpp (100%) rename src/Passes/{libs => Source/Passes}/ExpandStaticAllocation/SPECIFICATION.md (100%) rename src/Passes/{libs => Source/Passes}/InstructionReplacement/InstructionReplacement.cpp (100%) rename src/Passes/{libs => Source/Passes}/InstructionReplacement/InstructionReplacement.hpp (100%) rename src/Passes/{libs => Source/Passes}/InstructionReplacement/LibInstructionReplacement.cpp (100%) rename src/Passes/{libs => Source/Passes}/InstructionReplacement/QubitAllocationManager.cpp (100%) rename src/Passes/{libs => Source/Passes}/InstructionReplacement/QubitAllocationManager.hpp (100%) rename src/Passes/{libs => Source/Passes}/InstructionReplacement/SPECIFICATION.md (100%) rename src/Passes/{libs => Source/Passes}/OpsCounter/LibOpsCounter.cpp (100%) rename src/Passes/{libs => Source/Passes}/OpsCounter/OpsCounter.cpp (100%) rename src/Passes/{libs => Source/Passes}/OpsCounter/OpsCounter.hpp (100%) rename src/Passes/{libs => Source/Passes}/OpsCounter/SPECIFICATION.md (100%) rename src/Passes/{libs => Source/Passes}/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp (100%) rename src/Passes/{libs => Source/Passes}/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp (100%) rename src/Passes/{libs => Source/Passes}/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp (100%) rename src/Passes/{libs => Source/Passes}/QubitAllocationAnalysis/SPECIFICATION.md (100%) rename src/Passes/{src => Source/Rules}/OperandPrototype.cpp (100%) rename src/Passes/{include => Source}/Rules/OperandPrototype.hpp (100%) rename src/Passes/{src => Source/Rules}/ReplacementRule.cpp (100%) rename src/Passes/{include => Source}/Rules/ReplacementRule.hpp (100%) diff --git a/src/Passes/include/Llvm.hpp b/src/Passes/Source/LLvm/Llvm.hpp similarity index 100% rename from src/Passes/include/Llvm.hpp rename to src/Passes/Source/LLvm/Llvm.hpp diff --git a/src/Passes/libs/CMakeLists.txt b/src/Passes/Source/Passes/CMakeLists.txt similarity index 100% rename from src/Passes/libs/CMakeLists.txt rename to src/Passes/Source/Passes/CMakeLists.txt diff --git a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp similarity index 100% rename from src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp rename to src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp diff --git a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp similarity index 100% rename from src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp rename to src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp diff --git a/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp b/src/Passes/Source/Passes/ExpandStaticAllocation/LibExpandStaticAllocation.cpp similarity index 100% rename from src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp rename to src/Passes/Source/Passes/ExpandStaticAllocation/LibExpandStaticAllocation.cpp diff --git a/src/Passes/libs/ExpandStaticAllocation/SPECIFICATION.md b/src/Passes/Source/Passes/ExpandStaticAllocation/SPECIFICATION.md similarity index 100% rename from src/Passes/libs/ExpandStaticAllocation/SPECIFICATION.md rename to src/Passes/Source/Passes/ExpandStaticAllocation/SPECIFICATION.md diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp b/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.cpp similarity index 100% rename from src/Passes/libs/InstructionReplacement/InstructionReplacement.cpp rename to src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.cpp diff --git a/src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp b/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.hpp similarity index 100% rename from src/Passes/libs/InstructionReplacement/InstructionReplacement.hpp rename to src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.hpp diff --git a/src/Passes/libs/InstructionReplacement/LibInstructionReplacement.cpp b/src/Passes/Source/Passes/InstructionReplacement/LibInstructionReplacement.cpp similarity index 100% rename from src/Passes/libs/InstructionReplacement/LibInstructionReplacement.cpp rename to src/Passes/Source/Passes/InstructionReplacement/LibInstructionReplacement.cpp diff --git a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp b/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.cpp similarity index 100% rename from src/Passes/libs/InstructionReplacement/QubitAllocationManager.cpp rename to src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.cpp diff --git a/src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp b/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.hpp similarity index 100% rename from src/Passes/libs/InstructionReplacement/QubitAllocationManager.hpp rename to src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.hpp diff --git a/src/Passes/libs/InstructionReplacement/SPECIFICATION.md b/src/Passes/Source/Passes/InstructionReplacement/SPECIFICATION.md similarity index 100% rename from src/Passes/libs/InstructionReplacement/SPECIFICATION.md rename to src/Passes/Source/Passes/InstructionReplacement/SPECIFICATION.md diff --git a/src/Passes/libs/OpsCounter/LibOpsCounter.cpp b/src/Passes/Source/Passes/OpsCounter/LibOpsCounter.cpp similarity index 100% rename from src/Passes/libs/OpsCounter/LibOpsCounter.cpp rename to src/Passes/Source/Passes/OpsCounter/LibOpsCounter.cpp diff --git a/src/Passes/libs/OpsCounter/OpsCounter.cpp b/src/Passes/Source/Passes/OpsCounter/OpsCounter.cpp similarity index 100% rename from src/Passes/libs/OpsCounter/OpsCounter.cpp rename to src/Passes/Source/Passes/OpsCounter/OpsCounter.cpp diff --git a/src/Passes/libs/OpsCounter/OpsCounter.hpp b/src/Passes/Source/Passes/OpsCounter/OpsCounter.hpp similarity index 100% rename from src/Passes/libs/OpsCounter/OpsCounter.hpp rename to src/Passes/Source/Passes/OpsCounter/OpsCounter.hpp diff --git a/src/Passes/libs/OpsCounter/SPECIFICATION.md b/src/Passes/Source/Passes/OpsCounter/SPECIFICATION.md similarity index 100% rename from src/Passes/libs/OpsCounter/SPECIFICATION.md rename to src/Passes/Source/Passes/OpsCounter/SPECIFICATION.md diff --git a/src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp b/src/Passes/Source/Passes/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp similarity index 100% rename from src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp rename to src/Passes/Source/Passes/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp diff --git a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp similarity index 100% rename from src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp rename to src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp diff --git a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp similarity index 100% rename from src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp rename to src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp diff --git a/src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md b/src/Passes/Source/Passes/QubitAllocationAnalysis/SPECIFICATION.md similarity index 100% rename from src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md rename to src/Passes/Source/Passes/QubitAllocationAnalysis/SPECIFICATION.md diff --git a/src/Passes/src/OperandPrototype.cpp b/src/Passes/Source/Rules/OperandPrototype.cpp similarity index 100% rename from src/Passes/src/OperandPrototype.cpp rename to src/Passes/Source/Rules/OperandPrototype.cpp diff --git a/src/Passes/include/Rules/OperandPrototype.hpp b/src/Passes/Source/Rules/OperandPrototype.hpp similarity index 100% rename from src/Passes/include/Rules/OperandPrototype.hpp rename to src/Passes/Source/Rules/OperandPrototype.hpp diff --git a/src/Passes/src/ReplacementRule.cpp b/src/Passes/Source/Rules/ReplacementRule.cpp similarity index 100% rename from src/Passes/src/ReplacementRule.cpp rename to src/Passes/Source/Rules/ReplacementRule.cpp diff --git a/src/Passes/include/Rules/ReplacementRule.hpp b/src/Passes/Source/Rules/ReplacementRule.hpp similarity index 100% rename from src/Passes/include/Rules/ReplacementRule.hpp rename to src/Passes/Source/Rules/ReplacementRule.hpp From 17bbf79f84a1c168e043733354c86089515a9469 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 9 Aug 2021 13:45:57 +0200 Subject: [PATCH 061/106] Adding QAT commandline template --- src/Passes/{undefined/adaptor.cpp => Source/Apps/Qat/Qat.cpp} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/Passes/{undefined/adaptor.cpp => Source/Apps/Qat/Qat.cpp} (100%) diff --git a/src/Passes/undefined/adaptor.cpp b/src/Passes/Source/Apps/Qat/Qat.cpp similarity index 100% rename from src/Passes/undefined/adaptor.cpp rename to src/Passes/Source/Apps/Qat/Qat.cpp From 946b58d90a4f124bc9c720f0a5f2dcc291a00695 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 9 Aug 2021 14:04:08 +0200 Subject: [PATCH 062/106] Restructuring --- src/Passes/CMakeLists.txt | 29 +- src/Passes/Source/Apps/CMakeLists.txt | 3 + src/Passes/Source/Apps/Qat/Qat.cpp | 2 +- src/Passes/Source/CMakeLists.txt | 24 ++ src/Passes/Source/Passes/CMakeLists.txt | 2 +- .../ExpandStaticAllocation.cpp | 386 +++++++++--------- .../ExpandStaticAllocation.hpp | 81 ++-- .../LibExpandStaticAllocation.cpp | 45 +- .../ExpandStaticAllocation/SPECIFICATION.md | 1 - .../InstructionReplacement.cpp | 4 +- .../InstructionReplacement.hpp | 4 +- .../LibInstructionReplacement.cpp | 11 +- .../QubitAllocationManager.cpp | 2 +- .../QubitAllocationManager.hpp | 2 +- .../InstructionReplacement/SPECIFICATION.md | 1 - .../Passes/OpsCounter/LibOpsCounter.cpp | 65 ++- .../Source/Passes/OpsCounter/OpsCounter.cpp | 4 +- .../Source/Passes/OpsCounter/OpsCounter.hpp | 108 +++-- .../Source/Passes/OpsCounter/SPECIFICATION.md | 0 .../LibQubitAllocationAnalysis.cpp | 66 ++- .../QubitAllocationAnalysis.cpp | 4 +- .../QubitAllocationAnalysis.hpp | 209 +++++----- .../QubitAllocationAnalysis/SPECIFICATION.md | 9 - src/Passes/Source/Rules/OperandPrototype.hpp | 2 +- src/Passes/Source/Rules/ReplacementRule.hpp | 2 +- 25 files changed, 518 insertions(+), 548 deletions(-) create mode 100644 src/Passes/Source/Apps/CMakeLists.txt create mode 100644 src/Passes/Source/CMakeLists.txt delete mode 100644 src/Passes/Source/Passes/ExpandStaticAllocation/SPECIFICATION.md delete mode 100644 src/Passes/Source/Passes/InstructionReplacement/SPECIFICATION.md delete mode 100644 src/Passes/Source/Passes/OpsCounter/SPECIFICATION.md delete mode 100644 src/Passes/Source/Passes/QubitAllocationAnalysis/SPECIFICATION.md diff --git a/src/Passes/CMakeLists.txt b/src/Passes/CMakeLists.txt index b0d344ec3a..76aed4fe10 100644 --- a/src/Passes/CMakeLists.txt +++ b/src/Passes/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 3.4.3) -project(QSharpPasses) +project(QirPasses) find_package(LLVM REQUIRED CONFIG) include(CheckCXXCompilerFlag) @@ -42,32 +42,11 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON) include_directories(${LLVM_INCLUDE_DIRS}) link_directories(${LLVM_LIBRARY_DIRS}) add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/src) - -# Top level lib -file(GLOB sources RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) +include_directories(${CMAKE_SOURCE_DIR}/Source) +llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit orcshared orctargetprocess x86asmparser x86codegen x86desc x86disassembler x86info interpreter) -add_library(Passes - SHARED - ${sources}) -target_include_directories( - Passes - PRIVATE - "${CMAKE_CURRENT_SOURCE_DIR}/include" -) -target_link_libraries(Passes - "$<$:-undefined dynamic_lookup>") # Adding the libraries -add_subdirectory(libs) +add_subdirectory(Source) add_subdirectory(tests) -add_executable(adaptor undefined/adaptor.cpp) -target_include_directories( - adaptor - PRIVATE - "${CMAKE_CURRENT_SOURCE_DIR}/include" -) -llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit orcshared orctargetprocess x86asmparser x86codegen x86desc x86disassembler x86info interpreter) - -target_link_libraries(adaptor ${llvm_libs}) diff --git a/src/Passes/Source/Apps/CMakeLists.txt b/src/Passes/Source/Apps/CMakeLists.txt new file mode 100644 index 0000000000..3d4d5dcc03 --- /dev/null +++ b/src/Passes/Source/Apps/CMakeLists.txt @@ -0,0 +1,3 @@ +add_executable(qat Qat/Qat.cpp) + +target_link_libraries(qat ${llvm_libs}) diff --git a/src/Passes/Source/Apps/Qat/Qat.cpp b/src/Passes/Source/Apps/Qat/Qat.cpp index f699311118..aea054daba 100644 --- a/src/Passes/Source/Apps/Qat/Qat.cpp +++ b/src/Passes/Source/Apps/Qat/Qat.cpp @@ -1,4 +1,4 @@ -#include "Llvm.hpp" +#include "Llvm/Llvm.hpp" using namespace llvm; int main(int /*argc*/, char **argv) diff --git a/src/Passes/Source/CMakeLists.txt b/src/Passes/Source/CMakeLists.txt new file mode 100644 index 0000000000..e97953f783 --- /dev/null +++ b/src/Passes/Source/CMakeLists.txt @@ -0,0 +1,24 @@ +cmake_minimum_required(VERSION 3.4.3) + +# Creating the rules library +file(GLOB sources RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/Rules/*.cpp) + +add_library(Rules + SHARED + ${sources}) + +target_include_directories( + Rules + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/include" +) + +target_link_libraries(Rules + "$<$:-undefined dynamic_lookup>") + + +# Creating all of the passes library +add_subdirectory(Passes) + +# Creating all commandline apps +add_subdirectory(Apps) \ No newline at end of file diff --git a/src/Passes/Source/Passes/CMakeLists.txt b/src/Passes/Source/Passes/CMakeLists.txt index 733b72c1a5..e9529fec7c 100644 --- a/src/Passes/Source/Passes/CMakeLists.txt +++ b/src/Passes/Source/Passes/CMakeLists.txt @@ -37,7 +37,7 @@ foreach(pass_plugin ${ALL_PASSES}) # Linking - target_link_libraries(${pass_plugin} Passes) + target_link_libraries(${pass_plugin} Rules) target_link_libraries(${pass_plugin} "$<$:-undefined dynamic_lookup>") diff --git a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp index 5684864d7a..7c913b76b5 100644 --- a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp +++ b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp @@ -1,227 +1,223 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" +#include "Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp" -#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" +#include "Llvm/Llvm.hpp" #include #include -namespace microsoft +namespace microsoft { +namespace quantum { +llvm::PreservedAnalyses ExpandStaticAllocationPass::run(llvm::Function & function, + llvm::FunctionAnalysisManager &fam) { -namespace quantum -{ - llvm::PreservedAnalyses ExpandStaticAllocationPass::run( - llvm::Function& function, - llvm::FunctionAnalysisManager& fam) + // Pass body + for (auto &basic_block : function) + { + // Keeping track of instructions to remove in each block + std::vector to_remove; + + for (auto &instruction : basic_block) { - // Pass body - for (auto& basic_block : function) + // Finding calls + auto *call_instr = llvm::dyn_cast(&instruction); + if (call_instr == nullptr) + { + continue; + } + + ConstantArguments argument_constants{}; + std::vector remaining_arguments{}; + + auto callee_function = call_instr->getCalledFunction(); + auto &depenency_graph = fam.getResult(*callee_function); + + if (depenency_graph.size() > 0) + { + uint32_t idx = 0; + auto n = static_cast(callee_function->arg_size()); + + // Finding argument constants + while (idx < n) { - // Keeping track of instructions to remove in each block - std::vector to_remove; + auto arg = callee_function->getArg(idx); + auto value = call_instr->getArgOperand(idx); + + auto cst = llvm::dyn_cast(value); + if (cst != nullptr) + { + argument_constants[arg->getName().str()] = cst; + } + else + { + remaining_arguments.push_back(idx); + } + + ++idx; + } - for (auto& instruction : basic_block) - { - // Finding calls - auto* call_instr = llvm::dyn_cast(&instruction); - if (call_instr == nullptr) - { - continue; - } - - ConstantArguments argument_constants{}; - std::vector remaining_arguments{}; - - auto callee_function = call_instr->getCalledFunction(); - auto& depenency_graph = fam.getResult(*callee_function); - - if (depenency_graph.size() > 0) - { - uint32_t idx = 0; - auto n = static_cast(callee_function->arg_size()); - - // Finding argument constants - while (idx < n) - { - auto arg = callee_function->getArg(idx); - auto value = call_instr->getArgOperand(idx); - - auto cst = llvm::dyn_cast(value); - if (cst != nullptr) - { - argument_constants[arg->getName().str()] = cst; - } - else - { - remaining_arguments.push_back(idx); - } - - ++idx; - } - - // Checking which arrays are constant for this - auto new_callee = expandFunctionCall(depenency_graph, *callee_function, argument_constants); - - // Replacing call if a new function was created - if (new_callee != nullptr) - { - llvm::IRBuilder<> builder(call_instr); - (void)call_instr; - - // List with new call arguments - std::vector new_arguments; - for (auto const& i : remaining_arguments) - { - // Getting the i'th argument - llvm::Value* arg = call_instr->getArgOperand(i); - - // Adding arguments that were not constant - if (argument_constants.find(arg->getName().str()) == argument_constants.end()) - { - new_arguments.push_back(arg); - } - } - - // Creating a new call - llvm::Value* new_call = builder.CreateCall(new_callee, new_arguments); - - // Replace all calls to old function with calls to new function - for (auto& use : call_instr->uses()) - { - llvm::User* user = use.getUser(); - user->setOperand(use.getOperandNo(), new_call); - } - - // Schedule original instruction for deletion - to_remove.push_back(&instruction); - } - } - } + // Checking which arrays are constant for this + auto new_callee = expandFunctionCall(depenency_graph, *callee_function, argument_constants); - // Removing instructions - for (auto& instruction : to_remove) + // Replacing call if a new function was created + if (new_callee != nullptr) + { + llvm::IRBuilder<> builder(call_instr); + (void)call_instr; + + // List with new call arguments + std::vector new_arguments; + for (auto const &i : remaining_arguments) + { + // Getting the i'th argument + llvm::Value *arg = call_instr->getArgOperand(i); + + // Adding arguments that were not constant + if (argument_constants.find(arg->getName().str()) == argument_constants.end()) { - if (!instruction->use_empty()) - { - instruction->replaceAllUsesWith(llvm::UndefValue::get(instruction->getType())); - } - instruction->eraseFromParent(); + new_arguments.push_back(arg); } - } + } + + // Creating a new call + llvm::Value *new_call = builder.CreateCall(new_callee, new_arguments); + + // Replace all calls to old function with calls to new function + for (auto &use : call_instr->uses()) + { + llvm::User *user = use.getUser(); + user->setOperand(use.getOperandNo(), new_call); + } - return llvm::PreservedAnalyses::none(); + // Schedule original instruction for deletion + to_remove.push_back(&instruction); + } + } } - llvm::Function* ExpandStaticAllocationPass::expandFunctionCall( - QubitAllocationResult const& depenency_graph, - llvm::Function& callee, - ConstantArguments const& const_args) + // Removing instructions + for (auto &instruction : to_remove) { - bool should_replace_function = false; - if (!depenency_graph.empty()) - { - // Checking that any of all allocations in the function - // body becomes static from replacing constant function arguments - for (auto const& allocation : depenency_graph) - { - // Ignoring non-static allocations - if (!allocation.is_possibly_static) - { - continue; - } - - // Ignoring trivial allocations - if (allocation.depends_on.empty()) - { - continue; - } - - // Checking all dependencies are constant - bool all_const = true; - for (auto& name : allocation.depends_on) - { - all_const = all_const && (const_args.find(name) != const_args.end()); - } - - // In case that all dependencies are constant for this - // allocation, we should replace the function with one where - // the arguments are eliminated. - if (all_const) - { - should_replace_function = true; - } - } - } + if (!instruction->use_empty()) + { + instruction->replaceAllUsesWith(llvm::UndefValue::get(instruction->getType())); + } + instruction->eraseFromParent(); + } + } - // Replacing function if needed - if (should_replace_function) - { - auto module = callee.getParent(); - auto& context = module->getContext(); - llvm::IRBuilder<> builder(context); - - // Copying the original function - llvm::ValueToValueMapTy remapper; - std::vector arg_types; - - // The user might be deleting arguments to the function by specifying them in - // the VMap. If so, we need to not add the arguments to the arg ty vector - // - for (auto const& arg : callee.args()) - { - // Skipping constant arguments + return llvm::PreservedAnalyses::none(); +} - if (const_args.find(arg.getName().str()) != const_args.end()) - { - continue; - } +llvm::Function *ExpandStaticAllocationPass::expandFunctionCall( + QubitAllocationResult const &depenency_graph, llvm::Function &callee, + ConstantArguments const &const_args) +{ + bool should_replace_function = false; + if (!depenency_graph.empty()) + { + // Checking that any of all allocations in the function + // body becomes static from replacing constant function arguments + for (auto const &allocation : depenency_graph) + { + // Ignoring non-static allocations + if (!allocation.is_possibly_static) + { + continue; + } + + // Ignoring trivial allocations + if (allocation.depends_on.empty()) + { + continue; + } + + // Checking all dependencies are constant + bool all_const = true; + for (auto &name : allocation.depends_on) + { + all_const = all_const && (const_args.find(name) != const_args.end()); + } + + // In case that all dependencies are constant for this + // allocation, we should replace the function with one where + // the arguments are eliminated. + if (all_const) + { + should_replace_function = true; + } + } + } + + // Replacing function if needed + if (should_replace_function) + { + auto module = callee.getParent(); + auto & context = module->getContext(); + llvm::IRBuilder<> builder(context); + + // Copying the original function + llvm::ValueToValueMapTy remapper; + std::vector arg_types; + + // The user might be deleting arguments to the function by specifying them in + // the VMap. If so, we need to not add the arguments to the arg ty vector + // + for (auto const &arg : callee.args()) + { + // Skipping constant arguments - arg_types.push_back(arg.getType()); - } + if (const_args.find(arg.getName().str()) != const_args.end()) + { + continue; + } - // Creating a new function - llvm::FunctionType* function_type = llvm::FunctionType::get( - callee.getFunctionType()->getReturnType(), arg_types, callee.getFunctionType()->isVarArg()); - auto function = llvm::Function::Create( - function_type, callee.getLinkage(), callee.getAddressSpace(), callee.getName(), module); + arg_types.push_back(arg.getType()); + } - // Copying the non-const arguments - auto dest_args_it = function->arg_begin(); + // Creating a new function + llvm::FunctionType *function_type = llvm::FunctionType::get( + callee.getFunctionType()->getReturnType(), arg_types, callee.getFunctionType()->isVarArg()); + auto function = llvm::Function::Create(function_type, callee.getLinkage(), + callee.getAddressSpace(), callee.getName(), module); - for (auto const& arg : callee.args()) - { - auto const_it = const_args.find(arg.getName().str()); - if (const_it == const_args.end()) - { - // Mapping remaining function arguments - dest_args_it->setName(arg.getName()); - remapper[&arg] = &*dest_args_it++; - } - else - { - remapper[&arg] = llvm::ConstantInt::get(context, const_it->second->getValue()); - } - } + // Copying the non-const arguments + auto dest_args_it = function->arg_begin(); - llvm::SmallVector returns; // Ignore returns cloned. + for (auto const &arg : callee.args()) + { + auto const_it = const_args.find(arg.getName().str()); + if (const_it == const_args.end()) + { + // Mapping remaining function arguments + dest_args_it->setName(arg.getName()); + remapper[&arg] = &*dest_args_it++; + } + else + { + remapper[&arg] = llvm::ConstantInt::get(context, const_it->second->getValue()); + } + } - // TODO(tfr): In LLVM 13 upgrade 'true' to 'llvm::CloneFunctionChangeType::LocalChangesOnly' - llvm::CloneFunctionInto(function, &callee, remapper, true, returns, "", nullptr); + llvm::SmallVector returns; // Ignore returns cloned. - verifyFunction(*function); + // TODO(tfr): In LLVM 13 upgrade 'true' to 'llvm::CloneFunctionChangeType::LocalChangesOnly' + llvm::CloneFunctionInto(function, &callee, remapper, true, returns, "", nullptr); - return function; - } + verifyFunction(*function); - return nullptr; - } + return function; + } - bool ExpandStaticAllocationPass::isRequired() - { - return true; - } + return nullptr; +} + +bool ExpandStaticAllocationPass::isRequired() +{ + return true; +} -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp index fbee619be2..8803026292 100644 --- a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp +++ b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp @@ -2,50 +2,45 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" - -#include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" +#include "Llvm/Llvm.hpp" +#include "Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" #include -namespace microsoft -{ -namespace quantum -{ +namespace microsoft { +namespace quantum { - class ExpandStaticAllocationPass : public llvm::PassInfoMixin - { - public: - using QubitAllocationResult = QubitAllocationAnalysisAnalytics::Result; - using ConstantArguments = std::unordered_map; - - /// Constructors and destructors - /// @{ - ExpandStaticAllocationPass() = default; - ExpandStaticAllocationPass(ExpandStaticAllocationPass const&) = default; - ExpandStaticAllocationPass(ExpandStaticAllocationPass&&) = default; - ~ExpandStaticAllocationPass() = default; - /// @} - - /// Operators - /// @{ - ExpandStaticAllocationPass& operator=(ExpandStaticAllocationPass const&) = default; - ExpandStaticAllocationPass& operator=(ExpandStaticAllocationPass&&) = default; - /// @} - - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); - static bool isRequired(); - /// @} - - /// @{ - llvm::Function* expandFunctionCall( - QubitAllocationResult const& depenency_graph, - llvm::Function& callee, - ConstantArguments const& const_args); - /// @} - }; - -} // namespace quantum -} // namespace microsoft +class ExpandStaticAllocationPass : public llvm::PassInfoMixin +{ +public: + using QubitAllocationResult = QubitAllocationAnalysisAnalytics::Result; + using ConstantArguments = std::unordered_map; + + /// Constructors and destructors + /// @{ + ExpandStaticAllocationPass() = default; + ExpandStaticAllocationPass(ExpandStaticAllocationPass const &) = default; + ExpandStaticAllocationPass(ExpandStaticAllocationPass &&) = default; + ~ExpandStaticAllocationPass() = default; + /// @} + + /// Operators + /// @{ + ExpandStaticAllocationPass &operator=(ExpandStaticAllocationPass const &) = default; + ExpandStaticAllocationPass &operator=(ExpandStaticAllocationPass &&) = default; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); + static bool isRequired(); + /// @} + + /// @{ + llvm::Function *expandFunctionCall(QubitAllocationResult const &depenency_graph, + llvm::Function &callee, ConstantArguments const &const_args); + /// @} +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/ExpandStaticAllocation/LibExpandStaticAllocation.cpp b/src/Passes/Source/Passes/ExpandStaticAllocation/LibExpandStaticAllocation.cpp index e73a64b7d8..b4225f154a 100644 --- a/src/Passes/Source/Passes/ExpandStaticAllocation/LibExpandStaticAllocation.cpp +++ b/src/Passes/Source/Passes/ExpandStaticAllocation/LibExpandStaticAllocation.cpp @@ -1,42 +1,37 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" - -#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" +#include "Llvm/Llvm.hpp" +#include "Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp" #include #include -namespace -{ +namespace { llvm::PassPluginLibraryInfo getExpandStaticAllocationPluginInfo() { - using namespace microsoft::quantum; - using namespace llvm; + using namespace microsoft::quantum; + using namespace llvm; - return { - LLVM_PLUGIN_API_VERSION, "ExpandStaticAllocation", LLVM_VERSION_STRING, - [](PassBuilder& pb) - { - // Registering the pass - pb.registerPipelineParsingCallback( - [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) - { - if (name == "expand-static-allocation") - { - fpm.addPass(ExpandStaticAllocationPass()); - return true; - } + return { + LLVM_PLUGIN_API_VERSION, "ExpandStaticAllocation", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the pass + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "expand-static-allocation") + { + fpm.addPass(ExpandStaticAllocationPass()); + return true; + } - return false; - }); - }}; + return false; + }); + }}; } -} // namespace +} // namespace // Interface for loading the plugin extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getExpandStaticAllocationPluginInfo(); + return getExpandStaticAllocationPluginInfo(); } diff --git a/src/Passes/Source/Passes/ExpandStaticAllocation/SPECIFICATION.md b/src/Passes/Source/Passes/ExpandStaticAllocation/SPECIFICATION.md deleted file mode 100644 index 5095eea8b3..0000000000 --- a/src/Passes/Source/Passes/ExpandStaticAllocation/SPECIFICATION.md +++ /dev/null @@ -1 +0,0 @@ -# {ExpandStaticAllocation} Specification diff --git a/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.cpp b/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.cpp index 0b8026a19e..bb826121cd 100644 --- a/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.cpp +++ b/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.cpp @@ -1,9 +1,9 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "InstructionReplacement/InstructionReplacement.hpp" +#include "Passes/InstructionReplacement/InstructionReplacement.hpp" -#include "Llvm.hpp" +#include "Llvm/Llvm.hpp" #include #include diff --git a/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.hpp b/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.hpp index f4df4be49d..8f491bfd0b 100644 --- a/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.hpp +++ b/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.hpp @@ -2,8 +2,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "InstructionReplacement/QubitAllocationManager.hpp" -#include "Llvm.hpp" +#include "Llvm/Llvm.hpp" +#include "Passes/InstructionReplacement/QubitAllocationManager.hpp" #include "Rules/OperandPrototype.hpp" #include "Rules/ReplacementRule.hpp" diff --git a/src/Passes/Source/Passes/InstructionReplacement/LibInstructionReplacement.cpp b/src/Passes/Source/Passes/InstructionReplacement/LibInstructionReplacement.cpp index a2c14df13e..77c2aa08ec 100644 --- a/src/Passes/Source/Passes/InstructionReplacement/LibInstructionReplacement.cpp +++ b/src/Passes/Source/Passes/InstructionReplacement/LibInstructionReplacement.cpp @@ -1,9 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "InstructionReplacement/InstructionReplacement.hpp" - -#include "Llvm.hpp" +#include "Llvm/Llvm.hpp" +#include "Passes/InstructionReplacement/InstructionReplacement.hpp" #include #include @@ -13,7 +12,7 @@ llvm::PassPluginLibraryInfo getInstructionReplacementPluginInfo() { using namespace microsoft::quantum; using namespace llvm; - + return { LLVM_PLUGIN_API_VERSION, "InstructionReplacement", LLVM_VERSION_STRING, [](PassBuilder &pb) { // Registering the pass @@ -28,8 +27,8 @@ llvm::PassPluginLibraryInfo getInstructionReplacementPluginInfo() return false; }); }}; -} -} // namespace +} +} // namespace // Interface for loading the plugin extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() diff --git a/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.cpp b/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.cpp index e22b48176f..ca769fd9df 100644 --- a/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.cpp +++ b/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.cpp @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "InstructionReplacement/QubitAllocationManager.hpp" +#include "Passes/InstructionReplacement/QubitAllocationManager.hpp" #include #include diff --git a/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.hpp b/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.hpp index 70bb7e55a2..10d6e9fa74 100644 --- a/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.hpp +++ b/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.hpp @@ -1,7 +1,7 @@ #pragma once // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" +#include "Llvm/Llvm.hpp" #include #include diff --git a/src/Passes/Source/Passes/InstructionReplacement/SPECIFICATION.md b/src/Passes/Source/Passes/InstructionReplacement/SPECIFICATION.md deleted file mode 100644 index 76db543de7..0000000000 --- a/src/Passes/Source/Passes/InstructionReplacement/SPECIFICATION.md +++ /dev/null @@ -1 +0,0 @@ -# {InstructionReplacement} Specification diff --git a/src/Passes/Source/Passes/OpsCounter/LibOpsCounter.cpp b/src/Passes/Source/Passes/OpsCounter/LibOpsCounter.cpp index 65a7a238b9..d3de240e11 100644 --- a/src/Passes/Source/Passes/OpsCounter/LibOpsCounter.cpp +++ b/src/Passes/Source/Passes/OpsCounter/LibOpsCounter.cpp @@ -1,50 +1,47 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" - -#include "OpsCounter/OpsCounter.hpp" +#include "Llvm/Llvm.hpp" +#include "Passes/OpsCounter/OpsCounter.hpp" #include #include -namespace -{ +namespace { // Interface to plugin llvm::PassPluginLibraryInfo getOpsCounterPluginInfo() { - using namespace microsoft::quantum; - using namespace llvm; - - return { - LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, - [](PassBuilder& pb) - { - // Registering the printer - pb.registerPipelineParsingCallback( - [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) - { - if (name == "print") - { - fpm.addPass(OpsCounterPrinter(llvm::errs())); - return true; - } - return false; - }); - - pb.registerVectorizerStartEPCallback( - [](llvm::FunctionPassManager& fpm, llvm::PassBuilder::OptimizationLevel /*level*/) - { fpm.addPass(OpsCounterPrinter(llvm::errs())); }); - - // Registering the analysis module - pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager& fam) - { fam.registerPass([] { return OpsCounterAnalytics(); }); }); - }}; + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the printer + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "print") + { + fpm.addPass(OpsCounterPrinter(llvm::errs())); + return true; + } + return false; + }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { + fpm.addPass(OpsCounterPrinter(llvm::errs())); + }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { + fam.registerPass([] { return OpsCounterAnalytics(); }); + }); + }}; } -} // namespace +} // namespace extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getOpsCounterPluginInfo(); + return getOpsCounterPluginInfo(); } diff --git a/src/Passes/Source/Passes/OpsCounter/OpsCounter.cpp b/src/Passes/Source/Passes/OpsCounter/OpsCounter.cpp index 56ae30bf9f..154368bf21 100644 --- a/src/Passes/Source/Passes/OpsCounter/OpsCounter.cpp +++ b/src/Passes/Source/Passes/OpsCounter/OpsCounter.cpp @@ -1,9 +1,9 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "OpsCounter/OpsCounter.hpp" +#include "Passes/OpsCounter/OpsCounter.hpp" -#include "Llvm.hpp" +#include "Llvm/Llvm.hpp" #include #include diff --git a/src/Passes/Source/Passes/OpsCounter/OpsCounter.hpp b/src/Passes/Source/Passes/OpsCounter/OpsCounter.hpp index 09f73362f1..b385d0cb01 100644 --- a/src/Passes/Source/Passes/OpsCounter/OpsCounter.hpp +++ b/src/Passes/Source/Passes/OpsCounter/OpsCounter.hpp @@ -2,68 +2,66 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" +#include "Llvm/Llvm.hpp" -namespace microsoft -{ -namespace quantum -{ +namespace microsoft { +namespace quantum { - class OpsCounterAnalytics : public llvm::AnalysisInfoMixin - { - public: - using Result = llvm::StringMap; +class OpsCounterAnalytics : public llvm::AnalysisInfoMixin +{ +public: + using Result = llvm::StringMap; - /// Constructors and destructors - /// @{ - OpsCounterAnalytics() = default; - OpsCounterAnalytics(OpsCounterAnalytics const&) = delete; - OpsCounterAnalytics(OpsCounterAnalytics&&) = default; - ~OpsCounterAnalytics() = default; - /// @} + /// Constructors and destructors + /// @{ + OpsCounterAnalytics() = default; + OpsCounterAnalytics(OpsCounterAnalytics const &) = delete; + OpsCounterAnalytics(OpsCounterAnalytics &&) = default; + ~OpsCounterAnalytics() = default; + /// @} - /// Operators - /// @{ - OpsCounterAnalytics& operator=(OpsCounterAnalytics const&) = delete; - OpsCounterAnalytics& operator=(OpsCounterAnalytics&&) = delete; - /// @} + /// Operators + /// @{ + OpsCounterAnalytics &operator=(OpsCounterAnalytics const &) = delete; + OpsCounterAnalytics &operator=(OpsCounterAnalytics &&) = delete; + /// @} - /// Functions required by LLVM - /// @{ - Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); - /// @} + /// Functions required by LLVM + /// @{ + Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/); + /// @} - private: - static llvm::AnalysisKey Key; // NOLINT - friend struct llvm::AnalysisInfoMixin; - }; +private: + static llvm::AnalysisKey Key; // NOLINT + friend struct llvm::AnalysisInfoMixin; +}; - class OpsCounterPrinter : public llvm::PassInfoMixin - { - public: - /// Constructors and destructors - /// @{ - explicit OpsCounterPrinter(llvm::raw_ostream& out_stream); - OpsCounterPrinter() = delete; - OpsCounterPrinter(OpsCounterPrinter const&) = delete; - OpsCounterPrinter(OpsCounterPrinter&&) = default; - ~OpsCounterPrinter() = default; - /// @} +class OpsCounterPrinter : public llvm::PassInfoMixin +{ +public: + /// Constructors and destructors + /// @{ + explicit OpsCounterPrinter(llvm::raw_ostream &out_stream); + OpsCounterPrinter() = delete; + OpsCounterPrinter(OpsCounterPrinter const &) = delete; + OpsCounterPrinter(OpsCounterPrinter &&) = default; + ~OpsCounterPrinter() = default; + /// @} - /// Operators - /// @{ - OpsCounterPrinter& operator=(OpsCounterPrinter const&) = delete; - OpsCounterPrinter& operator=(OpsCounterPrinter&&) = delete; - /// @} + /// Operators + /// @{ + OpsCounterPrinter &operator=(OpsCounterPrinter const &) = delete; + OpsCounterPrinter &operator=(OpsCounterPrinter &&) = delete; + /// @} - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); - static bool isRequired(); - /// @} - private: - llvm::raw_ostream& out_stream_; - }; + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); + static bool isRequired(); + /// @} +private: + llvm::raw_ostream &out_stream_; +}; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/OpsCounter/SPECIFICATION.md b/src/Passes/Source/Passes/OpsCounter/SPECIFICATION.md deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/Passes/Source/Passes/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp b/src/Passes/Source/Passes/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp index ac03bc1f41..3cd8afbd9e 100644 --- a/src/Passes/Source/Passes/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp +++ b/src/Passes/Source/Passes/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp @@ -1,51 +1,47 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" - -#include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" +#include "Llvm/Llvm.hpp" +#include "Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" #include #include -namespace -{ +namespace { // Interface to plugin llvm::PassPluginLibraryInfo getQubitAllocationAnalysisPluginInfo() { - using namespace microsoft::quantum; - using namespace llvm; - - return { - LLVM_PLUGIN_API_VERSION, "QubitAllocationAnalysis", LLVM_VERSION_STRING, - [](PassBuilder& pb) - { - // Registering a printer for the anaylsis - pb.registerPipelineParsingCallback( - [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) - { - if (name == "print") - { - fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); - return true; - } - return false; - }); - - pb.registerVectorizerStartEPCallback( - [](llvm::FunctionPassManager& fpm, llvm::PassBuilder::OptimizationLevel /*level*/) - { fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); }); - - // Registering the analysis module - pb.registerAnalysisRegistrationCallback( - [](FunctionAnalysisManager& fam) - { fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); }); - }}; + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "QubitAllocationAnalysis", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering a printer for the anaylsis + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "print") + { + fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); + return true; + } + return false; + }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { + fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); + }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { + fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); + }); + }}; } -} // namespace +} // namespace extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getQubitAllocationAnalysisPluginInfo(); + return getQubitAllocationAnalysisPluginInfo(); } diff --git a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp index b24a10927d..02262ad21d 100644 --- a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp +++ b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp @@ -1,9 +1,9 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" +#include "Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" -#include "Llvm.hpp" +#include "Llvm/Llvm.hpp" #include #include diff --git a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp index 215388be56..68de055bed 100644 --- a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp +++ b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp @@ -2,114 +2,113 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" +#include "Llvm/Llvm.hpp" #include #include -namespace microsoft +namespace microsoft { +namespace quantum { + +class QubitAllocationAnalysisAnalytics + : public llvm::AnalysisInfoMixin { -namespace quantum +public: + using String = std::string; + using ArgList = std::unordered_set; + + struct QubitArray + { + bool is_possibly_static{false}; ///< Indicates whether the array is + /// possibly static or not + /// + String variable_name{}; ///< Name of the qubit array + ArgList depends_on{}; ///< Function arguments that + /// determines if it is constant or not + /// + uint64_t size{static_cast(-1)}; ///< Size of the array if it can be deduced. + }; + + using Value = llvm::Value; + using DependencyGraph = std::unordered_map; + using ValueDependencyGraph = std::unordered_map; + + using Instruction = llvm::Instruction; + using Function = llvm::Function; + using Result = std::vector; + + /// Constructors and destructors + /// @{ + QubitAllocationAnalysisAnalytics() = default; + QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics const &) = delete; + QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics &&) = default; + ~QubitAllocationAnalysisAnalytics() = default; + /// @} + + /// Operators + /// @{ + QubitAllocationAnalysisAnalytics &operator=(QubitAllocationAnalysisAnalytics const &) = delete; + QubitAllocationAnalysisAnalytics &operator=(QubitAllocationAnalysisAnalytics &&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/); + /// @} + + /// Function analysis + /// @{ + void analyseFunction(llvm::Function &function); + /// @} + + /// Instruction analysis + /// @{ + bool operandsConstant(Instruction const &instruction) const; + void markPossibleConstant(Instruction &instruction); + void analyseCall(Instruction &instruction); + /// @} + +private: + static llvm::AnalysisKey Key; // NOLINT + friend struct llvm::AnalysisInfoMixin; + + /// Analysis details + /// @{ + ValueDependencyGraph constantness_dependencies_{}; + /// @} + + /// Result + /// @{ + Result results_{}; + /// @} +}; + +class QubitAllocationAnalysisPrinter : public llvm::PassInfoMixin { - - class QubitAllocationAnalysisAnalytics : public llvm::AnalysisInfoMixin - { - public: - using String = std::string; - using ArgList = std::unordered_set; - - struct QubitArray - { - bool is_possibly_static{false}; ///< Indicates whether the array is - /// possibly static or not - /// - String variable_name{}; ///< Name of the qubit array - ArgList depends_on{}; ///< Function arguments that - /// determines if it is constant or not - /// - uint64_t size{static_cast(-1)}; ///< Size of the array if it can be deduced. - }; - - using Value = llvm::Value; - using DependencyGraph = std::unordered_map; - using ValueDependencyGraph = std::unordered_map; - - using Instruction = llvm::Instruction; - using Function = llvm::Function; - using Result = std::vector; - - /// Constructors and destructors - /// @{ - QubitAllocationAnalysisAnalytics() = default; - QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics const&) = delete; - QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics&&) = default; - ~QubitAllocationAnalysisAnalytics() = default; - /// @} - - /// Operators - /// @{ - QubitAllocationAnalysisAnalytics& operator=(QubitAllocationAnalysisAnalytics const&) = delete; - QubitAllocationAnalysisAnalytics& operator=(QubitAllocationAnalysisAnalytics&&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); - /// @} - - /// Function analysis - /// @{ - void analyseFunction(llvm::Function& function); - /// @} - - /// Instruction analysis - /// @{ - bool operandsConstant(Instruction const& instruction) const; - void markPossibleConstant(Instruction& instruction); - void analyseCall(Instruction& instruction); - /// @} - - private: - static llvm::AnalysisKey Key; // NOLINT - friend struct llvm::AnalysisInfoMixin; - - /// Analysis details - /// @{ - ValueDependencyGraph constantness_dependencies_{}; - /// @} - - /// Result - /// @{ - Result results_{}; - /// @} - }; - - class QubitAllocationAnalysisPrinter : public llvm::PassInfoMixin - { - public: - /// Constructors and destructors - /// @{ - explicit QubitAllocationAnalysisPrinter(llvm::raw_ostream& out_stream); - QubitAllocationAnalysisPrinter() = delete; - QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter const&) = delete; - QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter&&) = default; - ~QubitAllocationAnalysisPrinter() = default; - /// @} - - /// Operators - /// @{ - QubitAllocationAnalysisPrinter& operator=(QubitAllocationAnalysisPrinter const&) = delete; - QubitAllocationAnalysisPrinter& operator=(QubitAllocationAnalysisPrinter&&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); - static bool isRequired(); - /// @} - private: - llvm::raw_ostream& out_stream_; - }; - -} // namespace quantum -} // namespace microsoft +public: + /// Constructors and destructors + /// @{ + explicit QubitAllocationAnalysisPrinter(llvm::raw_ostream &out_stream); + QubitAllocationAnalysisPrinter() = delete; + QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter const &) = delete; + QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter &&) = default; + ~QubitAllocationAnalysisPrinter() = default; + /// @} + + /// Operators + /// @{ + QubitAllocationAnalysisPrinter &operator=(QubitAllocationAnalysisPrinter const &) = delete; + QubitAllocationAnalysisPrinter &operator=(QubitAllocationAnalysisPrinter &&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); + static bool isRequired(); + /// @} +private: + llvm::raw_ostream &out_stream_; +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/QubitAllocationAnalysis/SPECIFICATION.md b/src/Passes/Source/Passes/QubitAllocationAnalysis/SPECIFICATION.md deleted file mode 100644 index 4c2781d605..0000000000 --- a/src/Passes/Source/Passes/QubitAllocationAnalysis/SPECIFICATION.md +++ /dev/null @@ -1,9 +0,0 @@ -# Qubit Allocation Analysis - -## Purpose - -The purpose of this pass is to analyse the code for qubit allocations and identify -the allocation dependency. This helps subsequent transfomation passes expand the code -to, for instance, eliminate loops and classical logic. This is desirable as the control -logic for some quantum computing systems may be limited and one may therefore wish -to reduce its complexity as much as possible at compile time. diff --git a/src/Passes/Source/Rules/OperandPrototype.hpp b/src/Passes/Source/Rules/OperandPrototype.hpp index 43e1c7b296..853b16d107 100644 --- a/src/Passes/Source/Rules/OperandPrototype.hpp +++ b/src/Passes/Source/Rules/OperandPrototype.hpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" +#include "Llvm/Llvm.hpp" #include #include diff --git a/src/Passes/Source/Rules/ReplacementRule.hpp b/src/Passes/Source/Rules/ReplacementRule.hpp index 93240aa6da..0408ef5023 100644 --- a/src/Passes/Source/Rules/ReplacementRule.hpp +++ b/src/Passes/Source/Rules/ReplacementRule.hpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" +#include "Llvm/Llvm.hpp" #include "Rules/OperandPrototype.hpp" #include From b5b40fbf97e6f47994a2a64287e9b616255abaa8 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 9 Aug 2021 15:38:58 +0200 Subject: [PATCH 063/106] Finalising refactoring phase 1 --- .../AllocationManager.cpp} | 16 +- .../AllocationManager.hpp} | 16 +- src/Passes/Source/CMakeLists.txt | 20 +- .../InstructionReplacement.cpp | 202 +---------------- .../InstructionReplacement.hpp | 23 +- src/Passes/Source/Rules/RuleSet.cpp | 214 ++++++++++++++++++ src/Passes/Source/Rules/RuleSet.hpp | 44 ++++ .../examples/ClassicalIrCommandline/Makefile | 2 +- .../examples/QubitAllocationAnalysis/Makefile | 8 +- 9 files changed, 307 insertions(+), 238 deletions(-) rename src/Passes/Source/{Passes/InstructionReplacement/QubitAllocationManager.cpp => AllocationManager/AllocationManager.cpp} (73%) rename src/Passes/Source/{Passes/InstructionReplacement/QubitAllocationManager.hpp => AllocationManager/AllocationManager.hpp} (69%) create mode 100644 src/Passes/Source/Rules/RuleSet.cpp create mode 100644 src/Passes/Source/Rules/RuleSet.hpp diff --git a/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.cpp b/src/Passes/Source/AllocationManager/AllocationManager.cpp similarity index 73% rename from src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.cpp rename to src/Passes/Source/AllocationManager/AllocationManager.cpp index ca769fd9df..1d1bfe9c2f 100644 --- a/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.cpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.cpp @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Passes/InstructionReplacement/QubitAllocationManager.hpp" +#include "AllocationManager/AllocationManager.hpp" #include #include @@ -10,15 +10,15 @@ namespace microsoft { namespace quantum { -QubitAllocationManager::QubitAllocationManagerPtr QubitAllocationManager::createNew() +AllocationManager::AllocationManagerPtr AllocationManager::createNew() { - QubitAllocationManagerPtr ret; - ret.reset(new QubitAllocationManager()); + AllocationManagerPtr ret; + ret.reset(new AllocationManager()); return ret; } -void QubitAllocationManager::allocate(String const &name, Index const &size, bool value_only) +void AllocationManager::allocate(String const &name, Index const &size, bool value_only) { // Creating an array to store values // llvm::errs() << "Allocating " << name << " " << size << "\n"; @@ -57,7 +57,7 @@ void QubitAllocationManager::allocate(String const &name, Index const &size, boo } } -QubitAllocationManager::Array &QubitAllocationManager::get(String const &name) +AllocationManager::Array &AllocationManager::get(String const &name) { auto it = arrays_.find(name); if (it == arrays_.end()) @@ -67,7 +67,7 @@ QubitAllocationManager::Array &QubitAllocationManager::get(String const &name) return it->second; } -QubitAllocationManager::Index QubitAllocationManager::getOffset(String const &name) const +AllocationManager::Index AllocationManager::getOffset(String const &name) const { auto it = name_to_index_.find(name); if (it == name_to_index_.end()) @@ -79,7 +79,7 @@ QubitAllocationManager::Index QubitAllocationManager::getOffset(String const &na return mappings_[index].start; } -void QubitAllocationManager::release(String const & /*name*/) +void AllocationManager::release(String const & /*name*/) {} } // namespace quantum diff --git a/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.hpp b/src/Passes/Source/AllocationManager/AllocationManager.hpp similarity index 69% rename from src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.hpp rename to src/Passes/Source/AllocationManager/AllocationManager.hpp index 10d6e9fa74..3a330588bb 100644 --- a/src/Passes/Source/Passes/InstructionReplacement/QubitAllocationManager.hpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.hpp @@ -11,14 +11,14 @@ namespace microsoft { namespace quantum { -class QubitAllocationManager +class AllocationManager { public: - using Index = uint64_t; - using String = std::string; - using QubitAllocationManagerPtr = std::shared_ptr; - using Array = std::vector; - using Arrays = std::unordered_map; + using Index = uint64_t; + using String = std::string; + using AllocationManagerPtr = std::shared_ptr; + using Array = std::vector; + using Arrays = std::unordered_map; struct MemoryMapping { @@ -31,7 +31,7 @@ class QubitAllocationManager using NameToIndex = std::unordered_map; using Mappings = std::vector; - static QubitAllocationManagerPtr createNew(); + static AllocationManagerPtr createNew(); void allocate(String const &name, Index const &size, bool value_only = false); Index getOffset(String const &name) const; @@ -40,7 +40,7 @@ class QubitAllocationManager Array &get(String const &name); private: - QubitAllocationManager() = default; + AllocationManager() = default; NameToIndex name_to_index_; Mappings mappings_; diff --git a/src/Passes/Source/CMakeLists.txt b/src/Passes/Source/CMakeLists.txt index e97953f783..0fe06f4d51 100644 --- a/src/Passes/Source/CMakeLists.txt +++ b/src/Passes/Source/CMakeLists.txt @@ -1,11 +1,26 @@ cmake_minimum_required(VERSION 3.4.3) +# Creating Allocation Manager library +file(GLOB ALLOCATION_MGR_SOURCE RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/AllocationManager/*.cpp) +add_library(AllocationManager + SHARED + ${ALLOCATION_MGR_SOURCE}) + +target_include_directories( + AllocationManager + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/include" +) + +target_link_libraries(AllocationManager + "$<$:-undefined dynamic_lookup>") + # Creating the rules library -file(GLOB sources RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/Rules/*.cpp) +file(GLOB RULES_SOURCE RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/Rules/*.cpp) add_library(Rules SHARED - ${sources}) + ${RULES_SOURCE} ) target_include_directories( Rules @@ -13,6 +28,7 @@ target_include_directories( "${CMAKE_CURRENT_SOURCE_DIR}/include" ) +target_link_libraries(Rules AllocationManager) target_link_libraries(Rules "$<$:-undefined dynamic_lookup>") diff --git a/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.cpp b/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.cpp index bb826121cd..e54d74561d 100644 --- a/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.cpp +++ b/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.cpp @@ -11,186 +11,6 @@ namespace microsoft { namespace quantum { -InstructionReplacementPass::InstructionReplacementPass() -{ - using namespace microsoft::quantum::patterns; - - // Shared pointer to be captured in the lambdas of the patterns - // Note that you cannot capture this as the reference is destroyed upon - // copy. Since PassInfoMixin requires copy, such a construct would break - auto alloc_manager = QubitAllocationManager::createNew(); - - // Pattern 0 - Find type - ReplacementRule rule0; - - auto get_element = - Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); - rule0.setPattern("cast"_cap = BitCast("getElement"_cap = get_element)); - rule0.setReplacer([alloc_manager](Builder &, Value *, Captures &cap, Replacements &) { - llvm::errs() << "Identified an access attempt" - << "\n"; - - auto type = cap["cast"]->getType(); - - // This rule only deals with access to arrays of opaque types - auto ptr_type = llvm::dyn_cast(type); - if (ptr_type == nullptr) - { - return false; - } - - auto array = cap["arrayName"]; - - llvm::errs() << *array->getType() << " of " << *type << " " << type->isPointerTy() << " " - << *type->getPointerElementType() << " " << type->isArrayTy() << "\n"; - return false; - }); - rules_.emplace_back(std::move(rule0)); - - // Pattern 1 - Get array index - - // auto get_element = - // Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); - auto cast_pattern = BitCast("getElement"_cap = get_element); - auto load_pattern = Load("cast"_cap = cast_pattern); - - // Rule 1 - ReplacementRule rule1; - rule1.setPattern(std::move(load_pattern)); - - // Replacement details - rule1.setReplacer( - [alloc_manager](Builder &builder, Value *val, Captures &cap, Replacements &replacements) { - // Getting the type pointer - auto ptr_type = llvm::dyn_cast(val->getType()); - if (ptr_type == nullptr) - { - return false; - } - - // Get the index and testing that it is a constant int - auto cst = llvm::dyn_cast(cap["index"]); - if (cst == nullptr) - { - // ... if not, we cannot perform the mapping. - return false; - } - - // Computing the index by getting the current index value and offseting by - // the offset at which the qubit array is allocated. - auto llvm_size = cst->getValue(); - auto offset = alloc_manager->getOffset(cap["arrayName"]->getName().str()); - - // Creating a new index APInt that is shifted by the offset of the allocation - auto idx = llvm::APInt(llvm_size.getBitWidth(), llvm_size.getZExtValue() + offset); - - // Computing offset - auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); - - // TODO(tfr): Understand what the significance of the addressspace is in relation to the - // QIR. Activate by uncommenting: - // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); - auto instr = new llvm::IntToPtrInst(new_index, ptr_type); - instr->takeName(val); - - // Replacing the instruction with new instruction - replacements.push_back({llvm::dyn_cast(val), instr}); - - // Deleting the getelement and cast operations - replacements.push_back({llvm::dyn_cast(cap["getElement"]), nullptr}); - replacements.push_back({llvm::dyn_cast(cap["cast"]), nullptr}); - - return true; - }); - rules_.emplace_back(std::move(rule1)); - - // Rule 2 - delete __quantum__rt__array_update_alias_count - ReplacementRule rule2; - auto alias_count = std::make_shared("__quantum__rt__array_update_alias_count"); - rule2.setPattern(alias_count); - rule2.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - rules_.emplace_back(std::move(rule2)); - - // Rule 3 - delete __quantum__rt__qubit_release_array - ReplacementRule rule3; - auto release_call = std::make_shared("__quantum__rt__qubit_release_array"); - rule3.setPattern(release_call); - rule3.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - rules_.emplace_back(std::move(rule3)); - - // Rule 4 - perform static allocation and delete __quantum__rt__qubit_allocate_array - ReplacementRule rule4; - auto allocate_call = Call("__quantum__rt__qubit_allocate_array", "size"_cap = _); - rule4.setPattern(std::move(allocate_call)); - - rule4.setReplacer( - [alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { - auto cst = llvm::dyn_cast(cap["size"]); - if (cst == nullptr) - { - return false; - } - - auto llvm_size = cst->getValue(); - auto name = val->getName().str(); - alloc_manager->allocate(name, llvm_size.getZExtValue()); - - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - - rules_.emplace_back(std::move(rule4)); - - // Rule 5 - standard array allocation - ReplacementRule rule5; - auto allocate_array_call = - Call("__quantum__rt__array_create_1d", "elementSize"_cap = _, "size"_cap = _); - rule5.setPattern(std::move(allocate_array_call)); - - rule5.setReplacer( - [alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { - auto cst = llvm::dyn_cast(cap["size"]); - if (cst == nullptr) - { - return false; - } - - auto llvm_size = cst->getValue(); - alloc_manager->allocate(val->getName().str(), llvm_size.getZExtValue(), true); - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - - rules_.emplace_back(std::move(rule5)); - - // Rule 6 - track stored values - - auto get_target_element = Call("__quantum__rt__array_get_element_ptr_1d", - "targetArrayName"_cap = _, "targetIndex"_cap = _); - auto get_value_element = Call("__quantum__rt__array_get_element_ptr_1d", "valueArrayName"_cap = _, - "targetValue"_cap = _); - auto target = BitCast("target"_cap = get_target_element); - auto value = BitCast("value"_cap = get_element); - - auto store_pattern = Store(target, value); - - ReplacementRule rule6; - rule6.setPattern(std::move(store_pattern)); - - rule6.setReplacer([alloc_manager](Builder &, Value *, Captures &, Replacements &) { - llvm::errs() << "Found store pattern" - << "\n"; - return false; - }); - rules_.emplace_back(std::move(rule6)); -} - llvm::PreservedAnalyses InstructionReplacementPass::run(llvm::Function &function, llvm::FunctionAnalysisManager & /*fam*/) { @@ -202,7 +22,7 @@ llvm::PreservedAnalyses InstructionReplacementPass::run(llvm::Function &function { for (auto &instr : basic_block) { - matchAndReplace(&instr); + rule_set_.matchAndReplace(&instr, replacements_); } } @@ -246,25 +66,5 @@ bool InstructionReplacementPass::isRequired() return true; } -bool InstructionReplacementPass::matchAndReplace(Instruction *value) -{ - Captures captures; - for (auto const &rule : rules_) - { - // Checking if the rule is matched and keep track of captured nodes - if (rule.match(value, captures)) - { - - // If it is matched, we attempt to replace it - llvm::IRBuilder<> builder{value}; - if (rule.replace(builder, value, captures, replacements_)) - { - return true; - } - } - } - return false; -} - } // namespace quantum } // namespace microsoft diff --git a/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.hpp b/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.hpp index 8f491bfd0b..17706c23d1 100644 --- a/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.hpp +++ b/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.hpp @@ -3,9 +3,7 @@ // Licensed under the MIT License. #include "Llvm/Llvm.hpp" -#include "Passes/InstructionReplacement/QubitAllocationManager.hpp" -#include "Rules/OperandPrototype.hpp" -#include "Rules/ReplacementRule.hpp" +#include "Rules/RuleSet.hpp" #include @@ -15,17 +13,16 @@ namespace quantum { class InstructionReplacementPass : public llvm::PassInfoMixin { public: - using Captures = OperandPrototype::Captures; - using Replacements = ReplacementRule::Replacements; - using Instruction = llvm::Instruction; - using Rules = std::vector; - using Value = llvm::Value; - using Builder = ReplacementRule::Builder; - using QubitAllocationManagerPtr = QubitAllocationManager::QubitAllocationManagerPtr; + using Replacements = ReplacementRule::Replacements; + using Instruction = llvm::Instruction; + using Rules = std::vector; + using Value = llvm::Value; + using Builder = ReplacementRule::Builder; + using AllocationManagerPtr = AllocationManager::AllocationManagerPtr; /// Constructors and destructors /// @{ - InstructionReplacementPass(); + InstructionReplacementPass() = default; InstructionReplacementPass(InstructionReplacementPass const &) = delete; InstructionReplacementPass(InstructionReplacementPass &&) = default; ~InstructionReplacementPass() = default; @@ -43,10 +40,8 @@ class InstructionReplacementPass : public llvm::PassInfoMixin + +namespace microsoft { +namespace quantum { + +RuleSet::RuleSet() +{ + + using namespace microsoft::quantum::patterns; + + // Shared pointer to be captured in the lambdas of the patterns + // Note that you cannot capture this as the reference is destroyed upon + // copy. Since PassInfoMixin requires copy, such a construct would break + auto alloc_manager = AllocationManager::createNew(); + + // Pattern 0 - Find type + ReplacementRule rule0; + + auto get_element = + Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); + rule0.setPattern("cast"_cap = BitCast("getElement"_cap = get_element)); + rule0.setReplacer([alloc_manager](Builder &, Value *, Captures &cap, Replacements &) { + llvm::errs() << "Identified an access attempt" + << "\n"; + + auto type = cap["cast"]->getType(); + + // This rule only deals with access to arrays of opaque types + auto ptr_type = llvm::dyn_cast(type); + if (ptr_type == nullptr) + { + return false; + } + + auto array = cap["arrayName"]; + + llvm::errs() << *array->getType() << " of " << *type << " " << type->isPointerTy() << " " + << *type->getPointerElementType() << " " << type->isArrayTy() << "\n"; + return false; + }); + rules_.emplace_back(std::move(rule0)); + + // Pattern 1 - Get array index + + // auto get_element = + // Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); + auto cast_pattern = BitCast("getElement"_cap = get_element); + auto load_pattern = Load("cast"_cap = cast_pattern); + + // Rule 1 + ReplacementRule rule1; + rule1.setPattern(std::move(load_pattern)); + + // Replacement details + rule1.setReplacer( + [alloc_manager](Builder &builder, Value *val, Captures &cap, Replacements &replacements) { + // Getting the type pointer + auto ptr_type = llvm::dyn_cast(val->getType()); + if (ptr_type == nullptr) + { + return false; + } + + // Get the index and testing that it is a constant int + auto cst = llvm::dyn_cast(cap["index"]); + if (cst == nullptr) + { + // ... if not, we cannot perform the mapping. + return false; + } + + // Computing the index by getting the current index value and offseting by + // the offset at which the qubit array is allocated. + auto llvm_size = cst->getValue(); + auto offset = alloc_manager->getOffset(cap["arrayName"]->getName().str()); + + // Creating a new index APInt that is shifted by the offset of the allocation + auto idx = llvm::APInt(llvm_size.getBitWidth(), llvm_size.getZExtValue() + offset); + + // Computing offset + auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); + + // TODO(tfr): Understand what the significance of the addressspace is in relation to the + // QIR. Activate by uncommenting: + // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); + auto instr = new llvm::IntToPtrInst(new_index, ptr_type); + instr->takeName(val); + + // Replacing the instruction with new instruction + replacements.push_back({llvm::dyn_cast(val), instr}); + + // Deleting the getelement and cast operations + replacements.push_back({llvm::dyn_cast(cap["getElement"]), nullptr}); + replacements.push_back({llvm::dyn_cast(cap["cast"]), nullptr}); + + return true; + }); + rules_.emplace_back(std::move(rule1)); + + // Rule 2 - delete __quantum__rt__array_update_alias_count + ReplacementRule rule2; + auto alias_count = std::make_shared("__quantum__rt__array_update_alias_count"); + rule2.setPattern(alias_count); + rule2.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); + rules_.emplace_back(std::move(rule2)); + + // Rule 3 - delete __quantum__rt__qubit_release_array + ReplacementRule rule3; + auto release_call = std::make_shared("__quantum__rt__qubit_release_array"); + rule3.setPattern(release_call); + rule3.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); + rules_.emplace_back(std::move(rule3)); + + // Rule 4 - perform static allocation and delete __quantum__rt__qubit_allocate_array + ReplacementRule rule4; + auto allocate_call = Call("__quantum__rt__qubit_allocate_array", "size"_cap = _); + rule4.setPattern(std::move(allocate_call)); + + rule4.setReplacer( + [alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { + auto cst = llvm::dyn_cast(cap["size"]); + if (cst == nullptr) + { + return false; + } + + auto llvm_size = cst->getValue(); + auto name = val->getName().str(); + alloc_manager->allocate(name, llvm_size.getZExtValue()); + + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); + + rules_.emplace_back(std::move(rule4)); + + // Rule 5 - standard array allocation + ReplacementRule rule5; + auto allocate_array_call = + Call("__quantum__rt__array_create_1d", "elementSize"_cap = _, "size"_cap = _); + rule5.setPattern(std::move(allocate_array_call)); + + rule5.setReplacer( + [alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { + auto cst = llvm::dyn_cast(cap["size"]); + if (cst == nullptr) + { + return false; + } + + auto llvm_size = cst->getValue(); + alloc_manager->allocate(val->getName().str(), llvm_size.getZExtValue(), true); + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); + + rules_.emplace_back(std::move(rule5)); + + // Rule 6 - track stored values + + auto get_target_element = Call("__quantum__rt__array_get_element_ptr_1d", + "targetArrayName"_cap = _, "targetIndex"_cap = _); + auto get_value_element = Call("__quantum__rt__array_get_element_ptr_1d", "valueArrayName"_cap = _, + "targetValue"_cap = _); + auto target = BitCast("target"_cap = get_target_element); + auto value = BitCast("value"_cap = get_element); + + auto store_pattern = Store(target, value); + + ReplacementRule rule6; + rule6.setPattern(std::move(store_pattern)); + + rule6.setReplacer([alloc_manager](Builder &, Value *, Captures &, Replacements &) { + llvm::errs() << "Found store pattern" + << "\n"; + return false; + }); + rules_.emplace_back(std::move(rule6)); +} + +bool RuleSet::matchAndReplace(Instruction *value, Replacements &replacements) +{ + Captures captures; + for (auto const &rule : rules_) + { + // Checking if the rule is matched and keep track of captured nodes + if (rule.match(value, captures)) + { + + // If it is matched, we attempt to replace it + llvm::IRBuilder<> builder{value}; + if (rule.replace(builder, value, captures, replacements)) + { + return true; + } + } + } + return false; +} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/RuleSet.hpp b/src/Passes/Source/Rules/RuleSet.hpp new file mode 100644 index 0000000000..7eeec28de6 --- /dev/null +++ b/src/Passes/Source/Rules/RuleSet.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include "AllocationManager/AllocationManager.hpp" +#include "Llvm/Llvm.hpp" +#include "Rules/OperandPrototype.hpp" +#include "Rules/ReplacementRule.hpp" + +#include + +namespace microsoft { +namespace quantum { + +class RuleSet +{ +public: + using Rules = std::vector; + using Replacements = ReplacementRule::Replacements; + using Captures = OperandPrototype::Captures; + using Instruction = llvm::Instruction; + using Value = llvm::Value; + using Builder = ReplacementRule::Builder; + using AllocationManagerPtr = AllocationManager::AllocationManagerPtr; + + /// @{ + RuleSet(); + RuleSet(RuleSet const &) = delete; + RuleSet(RuleSet &&) = default; + ~RuleSet() = default; + /// @} + + /// Operators + /// @{ + RuleSet &operator=(RuleSet const &) = delete; + RuleSet &operator=(RuleSet &&) = default; + /// @} + + bool matchAndReplace(Instruction *value, Replacements &replacements); + +private: + Rules rules_; ///< Rules that describes QIR mappings +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/examples/ClassicalIrCommandline/Makefile b/src/Passes/examples/ClassicalIrCommandline/Makefile index 2deedec1bc..35f0e05627 100644 --- a/src/Passes/examples/ClassicalIrCommandline/Makefile +++ b/src/Passes/examples/ClassicalIrCommandline/Makefile @@ -6,7 +6,7 @@ emit-llvm-bc: debug-ng-pass-mac: emit-llvm-bc - opt -load-pass-plugin ../../Debug/libOpsCounter.dylib --passes="print" -disable-output classical-program.bc + opt -load-pass-plugin ../../Debug/Source/Passes/libOpsCounter.dylib --passes="print" -disable-output classical-program.bc clean: diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index b6312fa92a..feb45e06ce 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -1,14 +1,14 @@ run-expand: build-qaa build-esa analysis-example.ll - opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib \ - -load-pass-plugin ../../Debug/libs/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll + opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib \ + -load-pass-plugin ../../Debug/Source/Passes/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll run: build-qaa analysis-example.ll - opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll + opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll run-replace: build-ir analysis-example.ll # opt -loop-unroll -unroll-count=3 -unroll-allow-partial - opt -load-pass-plugin ../../Debug/libs/libInstructionReplacement.dylib --passes="mem2reg,simplifycfg,loop-simplify,loop-rotate,loop-unroll,instruction-replacement" -S analysis-example.ll > test.ll + opt -load-pass-plugin ../../Debug/Source/Passes/libInstructionReplacement.dylib --passes="mem2reg,simplifycfg,loop-simplify,loop-rotate,loop-unroll,instruction-replacement" -S analysis-example.ll > test.ll opt --passes="inline" -S test.ll | opt -O1 -S From 83cb962a04bbb9acfa70ec48d9b9fc9106d7d32c Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 9 Aug 2021 15:47:55 +0200 Subject: [PATCH 064/106] Renaming pass to more suitable name --- .../LibTransformationRule.cpp} | 10 +++++----- .../TransformationRule.cpp} | 8 ++++---- .../TransformationRule.hpp} | 14 +++++++------- 3 files changed, 16 insertions(+), 16 deletions(-) rename src/Passes/Source/Passes/{InstructionReplacement/LibInstructionReplacement.cpp => TransformationRule/LibTransformationRule.cpp} (68%) rename src/Passes/Source/Passes/{InstructionReplacement/InstructionReplacement.cpp => TransformationRule/TransformationRule.cpp} (83%) rename src/Passes/Source/Passes/{InstructionReplacement/InstructionReplacement.hpp => TransformationRule/TransformationRule.hpp} (64%) diff --git a/src/Passes/Source/Passes/InstructionReplacement/LibInstructionReplacement.cpp b/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp similarity index 68% rename from src/Passes/Source/Passes/InstructionReplacement/LibInstructionReplacement.cpp rename to src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp index 77c2aa08ec..f751c2729e 100644 --- a/src/Passes/Source/Passes/InstructionReplacement/LibInstructionReplacement.cpp +++ b/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp @@ -2,25 +2,25 @@ // Licensed under the MIT License. #include "Llvm/Llvm.hpp" -#include "Passes/InstructionReplacement/InstructionReplacement.hpp" +#include "Passes/TransformationRule/TransformationRule.hpp" #include #include namespace { -llvm::PassPluginLibraryInfo getInstructionReplacementPluginInfo() +llvm::PassPluginLibraryInfo getTransformationRulePluginInfo() { using namespace microsoft::quantum; using namespace llvm; return { - LLVM_PLUGIN_API_VERSION, "InstructionReplacement", LLVM_VERSION_STRING, [](PassBuilder &pb) { + LLVM_PLUGIN_API_VERSION, "TransformationRule", LLVM_VERSION_STRING, [](PassBuilder &pb) { // Registering the pass pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, ArrayRef /*unused*/) { if (name == "instruction-replacement") { - fpm.addPass(InstructionReplacementPass()); + fpm.addPass(TransformationRulePass()); return true; } @@ -33,5 +33,5 @@ llvm::PassPluginLibraryInfo getInstructionReplacementPluginInfo() // Interface for loading the plugin extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getInstructionReplacementPluginInfo(); + return getTransformationRulePluginInfo(); } diff --git a/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.cpp b/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp similarity index 83% rename from src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.cpp rename to src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp index e54d74561d..3673a3f2cb 100644 --- a/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.cpp +++ b/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Passes/InstructionReplacement/InstructionReplacement.hpp" +#include "Passes/TransformationRule/TransformationRule.hpp" #include "Llvm/Llvm.hpp" @@ -11,8 +11,8 @@ namespace microsoft { namespace quantum { -llvm::PreservedAnalyses InstructionReplacementPass::run(llvm::Function &function, - llvm::FunctionAnalysisManager & /*fam*/) +llvm::PreservedAnalyses TransformationRulePass::run(llvm::Function &function, + llvm::FunctionAnalysisManager & /*fam*/) { replacements_.clear(); @@ -61,7 +61,7 @@ llvm::PreservedAnalyses InstructionReplacementPass::run(llvm::Function &function return llvm::PreservedAnalyses::none(); } -bool InstructionReplacementPass::isRequired() +bool TransformationRulePass::isRequired() { return true; } diff --git a/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.hpp b/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp similarity index 64% rename from src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.hpp rename to src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp index 17706c23d1..969820899b 100644 --- a/src/Passes/Source/Passes/InstructionReplacement/InstructionReplacement.hpp +++ b/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp @@ -10,7 +10,7 @@ namespace microsoft { namespace quantum { -class InstructionReplacementPass : public llvm::PassInfoMixin +class TransformationRulePass : public llvm::PassInfoMixin { public: using Replacements = ReplacementRule::Replacements; @@ -22,16 +22,16 @@ class InstructionReplacementPass : public llvm::PassInfoMixin Date: Mon, 9 Aug 2021 20:34:22 +0200 Subject: [PATCH 065/106] Updating Python tasks --- .../Passes/TransformationRule/LibTransformationRule.cpp | 2 +- src/Passes/examples/QubitAllocationAnalysis/Makefile | 4 ++-- src/Passes/site-packages/TasksCI/cli.py | 2 +- src/Passes/site-packages/TasksCI/formatting.py | 2 +- src/Passes/site-packages/TasksCI/linting.py | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp b/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp index f751c2729e..0841f84081 100644 --- a/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp +++ b/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp @@ -18,7 +18,7 @@ llvm::PassPluginLibraryInfo getTransformationRulePluginInfo() // Registering the pass pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, ArrayRef /*unused*/) { - if (name == "instruction-replacement") + if (name == "transformation-rule") { fpm.addPass(TransformationRulePass()); return true; diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index feb45e06ce..11913ff58b 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -8,7 +8,7 @@ run: build-qaa analysis-example.ll run-replace: build-ir analysis-example.ll # opt -loop-unroll -unroll-count=3 -unroll-allow-partial - opt -load-pass-plugin ../../Debug/Source/Passes/libInstructionReplacement.dylib --passes="mem2reg,simplifycfg,loop-simplify,loop-rotate,loop-unroll,instruction-replacement" -S analysis-example.ll > test.ll + opt -load-pass-plugin ../../Debug/Source/Passes/libTransformationRule.dylib --passes="mem2reg,simplifycfg,loop-simplify,loop-rotate,loop-unroll,transformation-rule" -S analysis-example.ll > test.ll opt --passes="inline" -S test.ll | opt -O1 -S @@ -22,7 +22,7 @@ build-esa: build-prepare pushd ../../Debug && make ExpandStaticAllocation && popd || popd build-ir: build-prepare - pushd ../../Debug && make InstructionReplacement && popd || popd + pushd ../../Debug && make TransformationRule && popd || popd analysis-example.ll: diff --git a/src/Passes/site-packages/TasksCI/cli.py b/src/Passes/site-packages/TasksCI/cli.py index c9bc93c524..748f0cd56d 100644 --- a/src/Passes/site-packages/TasksCI/cli.py +++ b/src/Passes/site-packages/TasksCI/cli.py @@ -156,7 +156,7 @@ def create_pass(name: str, template: OptionalStr) -> None: """ # Checking whether the target already exists - target_dir = os.path.join(SOURCE_DIR, "libs", name) + target_dir = os.path.join(SOURCE_DIR, "Source", name) if os.path.exists(target_dir): logger.error("Pass '{}' already exists".format(name)) exit(-1) diff --git a/src/Passes/site-packages/TasksCI/formatting.py b/src/Passes/site-packages/TasksCI/formatting.py index 1d2bdfa4f0..af080f2ca3 100644 --- a/src/Passes/site-packages/TasksCI/formatting.py +++ b/src/Passes/site-packages/TasksCI/formatting.py @@ -104,7 +104,7 @@ def enforce_formatting(filename: str, contents: str, cursor: int, fix_issues: bo SOURCE_PIPELINES = [ { "name": "C++ Main", - "src": path.join(PROJECT_ROOT, "libs"), + "src": path.join(PROJECT_ROOT, "Source"), "pipelines": { "hpp": [ diff --git a/src/Passes/site-packages/TasksCI/linting.py b/src/Passes/site-packages/TasksCI/linting.py index b8313bcb4b..61eb2d98dd 100644 --- a/src/Passes/site-packages/TasksCI/linting.py +++ b/src/Passes/site-packages/TasksCI/linting.py @@ -54,7 +54,7 @@ def run_clang_tidy(build_dir: str, filename: str, fix_issues: bool = False) -> b cmd = [clang_tidy_binary] output_file = os.path.abspath(os.path.join(build_dir, 'clang_tidy_fixes.yaml')) - cmd.append('-header-filter=".*\\/(Passes)\\/(libs)\\/.*"') + cmd.append('-header-filter=".*\\/(Passes)\\/(Source)\\/.*"') cmd.append('-p=' + build_dir) cmd.append('-export-fixes={}'.format(output_file)) cmd.append('--use-color') @@ -104,7 +104,7 @@ def main_cpp(fix_issues: bool) -> bool: logger.info("Linting") build_dir = os.path.join(PROJECT_ROOT, "Debug") - source_dir = os.path.join(PROJECT_ROOT, "libs") + source_dir = os.path.join(PROJECT_ROOT, "Source") generator = None extensions = ["cpp"] From 788888d901ca67a784a382df6be2e0adb458f533 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 10 Aug 2021 09:00:05 +0200 Subject: [PATCH 066/106] Addingn teleportation example --- src/Passes/Source/Rules/RuleSet.cpp | 100 +- .../ConstSizeArray/ConstSizeArray.csproj | 6 +- .../ConstSizeArray/ConstSizeArray.qs | 52 +- .../ConstSizeArray/qir/ConstSizeArray.ll | 4945 +++++++++++------ .../analysis-example.ll | 269 +- .../examples/QubitAllocationAnalysis/test.ll | 220 +- 6 files changed, 3759 insertions(+), 1833 deletions(-) diff --git a/src/Passes/Source/Rules/RuleSet.cpp b/src/Passes/Source/Rules/RuleSet.cpp index 9d77ee3984..1120ac529e 100644 --- a/src/Passes/Source/Rules/RuleSet.cpp +++ b/src/Passes/Source/Rules/RuleSet.cpp @@ -4,8 +4,8 @@ #include "Llvm/Llvm.hpp" #include "Rules/ReplacementRule.hpp" +#include #include - namespace microsoft { namespace quantum { @@ -54,11 +54,11 @@ RuleSet::RuleSet() auto load_pattern = Load("cast"_cap = cast_pattern); // Rule 1 - ReplacementRule rule1; - rule1.setPattern(std::move(load_pattern)); + ReplacementRule rule1a; + rule1a.setPattern(std::move(load_pattern)); // Replacement details - rule1.setReplacer( + rule1a.setReplacer( [alloc_manager](Builder &builder, Value *val, Captures &cap, Replacements &replacements) { // Getting the type pointer auto ptr_type = llvm::dyn_cast(val->getType()); @@ -101,34 +101,74 @@ RuleSet::RuleSet() return true; }); - rules_.emplace_back(std::move(rule1)); + rules_.emplace_back(std::move(rule1a)); + + ReplacementRule rule1b; + rule1b.setPattern(Call("__quantum__rt__qubit_allocate")); + + // Replacement details + rule1b.setReplacer([](Builder &, Value *, Captures &, Replacements &) { + // std::cout << "Found single allocation" << std::endl; + return false; + }); + rules_.emplace_back(std::move(rule1b)); // Rule 2 - delete __quantum__rt__array_update_alias_count - ReplacementRule rule2; - auto alias_count = std::make_shared("__quantum__rt__array_update_alias_count"); - rule2.setPattern(alias_count); - rule2.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { + ReplacementRule rule2a; + auto alias_count1 = std::make_shared("__quantum__rt__array_update_alias_count"); + rule2a.setPattern(alias_count1); + rule2a.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { replacements.push_back({llvm::dyn_cast(val), nullptr}); return true; }); - rules_.emplace_back(std::move(rule2)); + rules_.emplace_back(std::move(rule2a)); - // Rule 3 - delete __quantum__rt__qubit_release_array - ReplacementRule rule3; - auto release_call = std::make_shared("__quantum__rt__qubit_release_array"); - rule3.setPattern(release_call); - rule3.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { + ReplacementRule rule2b; + auto alias_count2 = std::make_shared("__quantum__rt__string_update_alias_count"); + rule2b.setPattern(alias_count2); + rule2b.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { replacements.push_back({llvm::dyn_cast(val), nullptr}); return true; }); - rules_.emplace_back(std::move(rule3)); + rules_.emplace_back(std::move(rule2b)); + + // Rule 3 + ReplacementRule rule3a; + auto reference_count1 = + std::make_shared("__quantum__rt__array_update_reference_count"); + rule3a.setPattern(reference_count1); + rule3a.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); + rules_.emplace_back(std::move(rule3a)); - // Rule 4 - perform static allocation and delete __quantum__rt__qubit_allocate_array + ReplacementRule rule3b; + auto reference_count2 = + std::make_shared("__quantum__rt__string_update_reference_count"); + rule3b.setPattern(reference_count2); + rule3b.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); + rules_.emplace_back(std::move(rule3b)); + + // Rule 4 - delete __quantum__rt__qubit_release_array ReplacementRule rule4; + auto release_call = std::make_shared("__quantum__rt__qubit_release_array"); + rule4.setPattern(release_call); + rule4.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }); + rules_.emplace_back(std::move(rule4)); + + // Rule 6 - perform static allocation and delete __quantum__rt__qubit_allocate_array + ReplacementRule rule6; auto allocate_call = Call("__quantum__rt__qubit_allocate_array", "size"_cap = _); - rule4.setPattern(std::move(allocate_call)); + rule6.setPattern(std::move(allocate_call)); - rule4.setReplacer( + rule6.setReplacer( [alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { auto cst = llvm::dyn_cast(cap["size"]); if (cst == nullptr) @@ -144,15 +184,15 @@ RuleSet::RuleSet() return true; }); - rules_.emplace_back(std::move(rule4)); + rules_.emplace_back(std::move(rule6)); - // Rule 5 - standard array allocation - ReplacementRule rule5; + // Rule 8 - standard array allocation + ReplacementRule rule8; auto allocate_array_call = Call("__quantum__rt__array_create_1d", "elementSize"_cap = _, "size"_cap = _); - rule5.setPattern(std::move(allocate_array_call)); + rule8.setPattern(std::move(allocate_array_call)); - rule5.setReplacer( + rule8.setReplacer( [alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { auto cst = llvm::dyn_cast(cap["size"]); if (cst == nullptr) @@ -166,9 +206,9 @@ RuleSet::RuleSet() return true; }); - rules_.emplace_back(std::move(rule5)); + rules_.emplace_back(std::move(rule8)); - // Rule 6 - track stored values + // Rule 10 - track stored values auto get_target_element = Call("__quantum__rt__array_get_element_ptr_1d", "targetArrayName"_cap = _, "targetIndex"_cap = _); @@ -179,15 +219,15 @@ RuleSet::RuleSet() auto store_pattern = Store(target, value); - ReplacementRule rule6; - rule6.setPattern(std::move(store_pattern)); + ReplacementRule rule10; + rule10.setPattern(std::move(store_pattern)); - rule6.setReplacer([alloc_manager](Builder &, Value *, Captures &, Replacements &) { + rule10.setReplacer([alloc_manager](Builder &, Value *, Captures &, Replacements &) { llvm::errs() << "Found store pattern" << "\n"; return false; }); - rules_.emplace_back(std::move(rule6)); + rules_.emplace_back(std::move(rule10)); } bool RuleSet::matchAndReplace(Instruction *value, Replacements &replacements) diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj index eeab572589..ab96e16e33 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj @@ -1,4 +1,4 @@ - + Exe @@ -6,4 +6,8 @@ true + + + + diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index 96330ce7a0..9d4e8f0927 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -1,12 +1,52 @@ -namespace Feasibility -{ +namespace TeleportChain { open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Measurement; + open Microsoft.Quantum.Preparation; + + operation PrepareEntangledPair(left : Qubit, right : Qubit) : Unit is Adj + Ctl { + H(left); + CNOT(left, right); + } + + operation ApplyCorrection(src : Qubit, intermediary : Qubit, dest : Qubit) : Unit { + if (MResetZ(src) == One) { Z(dest); } + if (MResetZ(intermediary) == One) { X(dest); } + } + + operation TeleportQubitUsingPresharedEntanglement(src : Qubit, intermediary : Qubit, dest : Qubit) : Unit { + Adjoint PrepareEntangledPair(src, intermediary); + ApplyCorrection(src, intermediary, dest); + } + + operation TeleportQubit(src : Qubit, dest : Qubit) : Unit { + use intermediary = Qubit(); + PrepareEntangledPair(intermediary, dest); + TeleportQubitUsingPresharedEntanglement(src, intermediary, dest); + } + + operation DemonstrateEntanglementSwapping() : (Result, Result) { + use (reference, src, intermediary, dest) = (Qubit(), Qubit(), Qubit(), Qubit()); + PrepareEntangledPair(reference, src); + TeleportQubit(src, dest); + return (MResetZ(reference), MResetZ(dest)); + } @EntryPoint() - operation QubitMapping() : Unit { - use qs = Qubit[3]; - for q in 8..10 { - X(qs[q - 8]); + operation DemonstrateTeleportationUsingPresharedEntanglement() : (Result, Result) { + let nPairs = 2; + use (leftMessage, rightMessage, leftPreshared, rightPreshared) = (Qubit(), Qubit(), Qubit[nPairs], Qubit[nPairs]); + PrepareEntangledPair(leftMessage, rightMessage); + for i in 0..nPairs-1 { + PrepareEntangledPair(leftPreshared[i], rightPreshared[i]); } + + TeleportQubitUsingPresharedEntanglement(rightMessage, leftPreshared[0], rightPreshared[0]); + for i in 1..nPairs-1 { + TeleportQubitUsingPresharedEntanglement(rightPreshared[i-1], leftPreshared[i], rightPreshared[i]); + } + + return (MResetZ(leftMessage), MResetZ(rightPreshared[nPairs-1])); } } \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll index 6e6d17129b..a646404c8b 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll @@ -1,2080 +1,3503 @@ %Range = type { i64, i64, i64 } %Tuple = type opaque -%Array = type opaque %Qubit = type opaque -%String = type opaque -%Callable = type opaque %Result = type opaque +%Array = type opaque +%Callable = type opaque +%String = type opaque @PauliI = internal constant i2 0 @PauliX = internal constant i2 1 @PauliY = internal constant i2 -1 @PauliZ = internal constant i2 -2 @EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } -@PartialApplication__1 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@MemoryManagement__1 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] -@PartialApplication__2 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__3 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctladj__wrapper] -@MemoryManagement__2 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__2__RefCount, void (%Tuple*, i32)* @MemoryManagement__2__AliasCount] -@PartialApplication__4 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctladj__wrapper] -@PartialApplication__5 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctladj__wrapper] -@PartialApplication__6 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctladj__wrapper] -@PartialApplication__7 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__8 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__9 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctladj__wrapper] -@PartialApplication__10 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctladj__wrapper] -@PartialApplication__11 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctladj__wrapper] -@PartialApplication__12 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctladj__wrapper] - -define internal void @Feasibility__QubitMapping__body() { -entry: - %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i32 1) +@0 = internal constant [18 x i8] c"Unsupported input\00" +@1 = internal constant [18 x i8] c"Unsupported input\00" +@Microsoft__Quantum__Intrinsic__CNOT = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__H = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__Rx = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__Ry = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Ry__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Ry__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Ry__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Ry__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__Rz = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rz__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rz__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rz__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rz__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__S = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__T = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__X = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__Z = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__ctladj__wrapper] +@2 = internal constant [2 x i8] c"(\00" +@3 = internal constant [3 x i8] c", \00" +@4 = internal constant [2 x i8] c")\00" + +define internal void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) { +entry: + %0 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %dest) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + %3 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %intermediary) + %4 = call %Result* @__quantum__rt__result_get_one() + %5 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %4) + call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) + br i1 %5, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %dest) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + ret void +} + +define internal %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) { +entry: + %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +declare %Result* @__quantum__rt__result_get_one() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +define internal void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x(%Qubit* %qubit) + ret void +} + +define internal { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() { +entry: + %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %leftPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 1) + %rightPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 1) + call void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) br label %header__1 header__1: ; preds = %exiting__1, %entry - %q = phi i64 [ 8, %entry ], [ %4, %exiting__1 ] - %0 = icmp sle i64 %q, 10 + %i = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %0 = icmp sle i64 %i, 1 br i1 %0, label %body__1, label %exit__1 body__1: ; preds = %header__1 - %1 = sub i64 %q, 8 - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 %1) - %3 = bitcast i8* %2 to %Qubit** - %qubit = load %Qubit*, %Qubit** %3, align 8 - call void @__quantum__qis__x__body(%Qubit* %qubit) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 %i) + %2 = bitcast i8* %1 to %Qubit** + %3 = load %Qubit*, %Qubit** %2, align 8 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 %i) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @TeleportChain__PrepareEntangledPair__body(%Qubit* %3, %Qubit* %6) br label %exiting__1 exiting__1: ; preds = %body__1 - %4 = add i64 %q, 1 + %7 = add i64 %i, 1 br label %header__1 exit__1: ; preds = %header__1 - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qs) - ret void + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12, align 8 + call void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %10, %Qubit* %13) + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %i__1 = phi i64 [ 1, %exit__1 ], [ %25, %exiting__2 ] + %14 = icmp sle i64 %i__1, 1 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = sub i64 %i__1, 1 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 %15) + %17 = bitcast i8* %16 to %Qubit** + %18 = load %Qubit*, %Qubit** %17, align 8 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 %i__1) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20, align 8 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 %i__1) + %23 = bitcast i8* %22 to %Qubit** + %24 = load %Qubit*, %Qubit** %23, align 8 + call void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %18, %Qubit* %21, %Qubit* %24) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %i__1, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %27 = bitcast %Tuple* %26 to { %Result*, %Result* }* + %28 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %27, i32 0, i32 1 + %30 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %leftMessage) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %32 = bitcast i8* %31 to %Qubit** + %33 = load %Qubit*, %Qubit** %32, align 8 + %34 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %33) + store %Result* %30, %Result** %28, align 8 + store %Result* %34, %Result** %29, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) + call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) + call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) + call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) + ret { %Result*, %Result* }* %27 } declare %Qubit* @__quantum__rt__qubit_allocate() declare %Array* @__quantum__rt__qubit_allocate_array(i64) +declare void @__quantum__rt__qubit_release(%Qubit*) + declare void @__quantum__rt__qubit_release_array(%Array*) declare void @__quantum__rt__array_update_alias_count(%Array*, i32) -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) +define internal void @TeleportChain__PrepareEntangledPair__body(%Qubit* %left, %Qubit* %right) { +entry: + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %left) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %left, %Qubit* %right) + ret void +} -declare void @__quantum__qis__x__body(%Qubit*) +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) -define internal { %String* }* @Microsoft__Quantum__Diagnostics__EnableTestingViaName__body(%String* %__Item1__) { +define internal void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) { entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %__Item1__, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) - ret { %String* }* %1 + call void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %src, %Qubit* %intermediary) + call void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) + ret void } declare %Tuple* @__quantum__rt__tuple_create(i64) -declare void @__quantum__rt__string_update_reference_count(%String*, i32) +define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h(%Qubit* %qubit) + ret void +} -define internal { %String* }* @Microsoft__Quantum__Diagnostics__Test__body(%String* %ExecutionTarget) { +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %ExecutionTarget, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %ExecutionTarget, i32 1) - ret { %String* }* %1 + call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) + ret void } -define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +define internal void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %left, %Qubit* %right) { entry: - call void @__quantum__qis__x__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %left, %Qubit* %right) + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %left) ret void } -define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) { entry: - call void @__quantum__qis__x__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) ret void } -define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +define internal void @TeleportChain__PrepareEntangledPair__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %left = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %right = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %left) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %left, %Qubit** %5, align 8 + store %Qubit* %right, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) ret void } -declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) +define internal void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__h(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %6, %Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test1__1 + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Array* %ctls, %Array** %10, align 8 + store %Qubit* %qubit, %Qubit** %11, align 8 + call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %7, { %Array*, %Qubit* }* %9) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + br label %continue__1 -define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %ctls, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %4 = icmp eq i64 %3, 0 + br i1 %4, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %6 = icmp eq i64 %5, 1 + br i1 %6, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %9, %Qubit* %control, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %test1__1 + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__CNOT, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %10) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %12 = bitcast %Tuple* %11 to { %Array*, { %Qubit*, %Qubit* }* }* + %13 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %12, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %16 = bitcast %Tuple* %15 to { %Qubit*, %Qubit* }* + %17 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %16, i32 0, i32 1 + store %Qubit* %control, %Qubit** %17, align 8 + store %Qubit* %target, %Qubit** %18, align 8 + store %Array* %ctls, %Array** %13, align 8 + store { %Qubit*, %Qubit* }* %16, { %Qubit*, %Qubit* }** %14, align 8 + call void @Microsoft__Quantum__Intrinsic___c1b23cd4538f4bf9ab69d9c3e574aa70___QsRef23__ApplyWithLessControlsA____body(%Callable* %10, { %Array*, { %Qubit*, %Qubit* }* }* %12) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +define internal void @TeleportChain__PrepareEntangledPair__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %left = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %right = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %left, %Qubit** %5, align 8 + store %Qubit* %right, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %left) call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) ret void } -define internal %Tuple* @Microsoft__Quantum__Core__Attribute__body() { +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { entry: - ret %Tuple* null + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5, align 8 + store %Qubit* %target, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void } -define internal { %String* }* @Microsoft__Quantum__Core__Deprecated__body(%String* %NewName) { +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %NewName, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %NewName, i32 1) - ret { %String* }* %1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void } -define internal %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { +define internal double @Microsoft__Quantum__Math__PI__body() { entry: - ret %Tuple* null + ret double 0x400921FB54442D18 } -define internal %Tuple* @Microsoft__Quantum__Core__Inline__body() { +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyControlledX____body(%Qubit* %control, %Qubit* %target) { entry: - ret %Tuple* null + call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) + ret void } -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyControlledX____adj(%Qubit* %control, %Qubit* %target) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) ret void } -declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyControlledZ____body(%Qubit* %control, %Qubit* %target) { +entry: + call void @__quantum__qis__cz(%Qubit* %control, %Qubit* %target) + ret void +} -declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) +declare void @__quantum__qis__cz(%Qubit*, %Qubit*) -declare void @__quantum__qis__applyconditionallyintrinsic__body(%Array*, %Array*, %Callable*, %Callable*) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyControlledZ____adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @__quantum__qis__cz(%Qubit* %control, %Qubit* %target) ret void } -declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____body(double %theta) { +entry: + ret void +} -declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____adj(double %theta) { +entry: + ret void +} -declare void @__quantum__rt__callable_make_adjoint(%Callable*) +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____ctl(%Array* %controls, double %theta) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %1 = icmp sgt i64 %0, 0 + br i1 %1, label %then0__1, label %continue__1 -declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) +then0__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = sub i64 %0, 1 + %5 = load %Range, %Range* @EmptyRange, align 4 + %6 = insertvalue %Range %5, i64 1, 0 + %7 = insertvalue %Range %6, i64 1, 1 + %8 = insertvalue %Range %7, i64 %4, 2 + %rest = call %Array* @__quantum__rt__array_slice_1d(%Array* %controls, %Range %8, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %rest, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { double, %Qubit* }* + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 1 + store double %theta, double* %11, align 8 + store %Qubit* %qubit, %Qubit** %12, align 8 + call void @Microsoft__Quantum__Intrinsic__R1__ctl(%Array* %rest, { double, %Qubit* }* %10) + call void @__quantum__rt__array_update_alias_count(%Array* %rest, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %rest, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + ret void +} + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare %Array* @__quantum__rt__array_slice_1d(%Array*, %Range, i1) + +define internal void @Microsoft__Quantum__Intrinsic__R1__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + %8 = load i2, i2* @PauliZ, align 1 + store i2 %8, i2* %5, align 1 + store double %theta, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i2, double, %Qubit* }* + %11 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %10, i32 0, i32 1 + %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %10, i32 0, i32 2 + %14 = load i2, i2* @PauliI, align 1 + %15 = fneg double %theta + store i2 %14, i2* %11, align 1 + store double %15, double* %12, align 8 + store %Qubit* %qubit, %Qubit** %13, align 8 + call void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %10) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____ctladj(%Array* %controls, double %theta) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %1 = icmp sgt i64 %0, 0 + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %__qsVar0__qubit__ = load %Qubit*, %Qubit** %3, align 8 + %4 = sub i64 %0, 1 + %5 = load %Range, %Range* @EmptyRange, align 4 + %6 = insertvalue %Range %5, i64 1, 0 + %7 = insertvalue %Range %6, i64 1, 1 + %8 = insertvalue %Range %7, i64 %4, 2 + %__qsVar1__rest__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controls, %Range %8, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__rest__, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { double, %Qubit* }* + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 1 + store double %theta, double* %11, align 8 + store %Qubit* %__qsVar0__qubit__, %Qubit** %12, align 8 + call void @Microsoft__Quantum__Intrinsic__R1__ctladj(%Array* %__qsVar1__rest__, { double, %Qubit* }* %10) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__rest__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__rest__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + %8 = load i2, i2* @PauliI, align 1 + %9 = fneg double %theta + store i2 %8, i2* %5, align 1 + store double %9, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i2, double, %Qubit* }* + %12 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %11, i32 0, i32 2 + %15 = load i2, i2* @PauliZ, align 1 + store i2 %15, i2* %12, align 1 + store double %theta, double* %13, align 8 + store %Qubit* %qubit, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %11) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledH____body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__h(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledH____adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledX____body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__x(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledX____adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledZ____body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__z(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledZ____adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____body(%Qubit* %qubit, i2 %from, i2 %to) { +entry: + %0 = icmp eq i2 %from, %to + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + br label %continue__1 -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { +test1__1: ; preds = %entry + %1 = load i2, i2* @PauliZ, align 1 + %2 = icmp eq i2 %from, %1 + br i1 %2, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %test1__1 + %3 = load i2, i2* @PauliX, align 1 + %4 = icmp eq i2 %to, %3 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %test1__1 + %5 = phi i1 [ %4, %condTrue__1 ], [ %2, %test1__1 ] + %6 = xor i1 %5, true + br i1 %6, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %condContinue__1 + %7 = load i2, i2* @PauliX, align 1 + %8 = icmp eq i2 %from, %7 + br i1 %8, label %condTrue__3, label %condContinue__3 + +condTrue__3: ; preds = %condTrue__2 + %9 = load i2, i2* @PauliZ, align 1 + %10 = icmp eq i2 %to, %9 + br label %condContinue__3 + +condContinue__3: ; preds = %condTrue__3, %condTrue__2 + %11 = phi i1 [ %10, %condTrue__3 ], [ %8, %condTrue__2 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condContinue__1 + %12 = phi i1 [ %11, %condContinue__3 ], [ %5, %condContinue__1 ] + br i1 %12, label %then1__1, label %test2__1 + +then1__1: ; preds = %condContinue__2 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + br label %continue__1 + +test2__1: ; preds = %condContinue__2 + %13 = load i2, i2* @PauliZ, align 1 + %14 = icmp eq i2 %from, %13 + br i1 %14, label %condTrue__4, label %condContinue__4 + +condTrue__4: ; preds = %test2__1 + %15 = load i2, i2* @PauliY, align 1 + %16 = icmp eq i2 %to, %15 + br label %condContinue__4 + +condContinue__4: ; preds = %condTrue__4, %test2__1 + %17 = phi i1 [ %16, %condTrue__4 ], [ %14, %test2__1 ] + br i1 %17, label %then2__1, label %test3__1 + +then2__1: ; preds = %condContinue__4 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + br label %continue__1 + +test3__1: ; preds = %condContinue__4 + %18 = load i2, i2* @PauliY, align 1 + %19 = icmp eq i2 %from, %18 + br i1 %19, label %condTrue__5, label %condContinue__5 + +condTrue__5: ; preds = %test3__1 + %20 = load i2, i2* @PauliZ, align 1 + %21 = icmp eq i2 %to, %20 + br label %condContinue__5 + +condContinue__5: ; preds = %condTrue__5, %test3__1 + %22 = phi i1 [ %21, %condTrue__5 ], [ %19, %test3__1 ] + br i1 %22, label %then3__1, label %test4__1 + +then3__1: ; preds = %condContinue__5 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + br label %continue__1 + +test4__1: ; preds = %condContinue__5 + %23 = load i2, i2* @PauliY, align 1 + %24 = icmp eq i2 %from, %23 + br i1 %24, label %condTrue__6, label %condContinue__6 + +condTrue__6: ; preds = %test4__1 + %25 = load i2, i2* @PauliX, align 1 + %26 = icmp eq i2 %to, %25 + br label %condContinue__6 + +condContinue__6: ; preds = %condTrue__6, %test4__1 + %27 = phi i1 [ %26, %condTrue__6 ], [ %24, %test4__1 ] + br i1 %27, label %then4__1, label %test5__1 + +then4__1: ; preds = %condContinue__6 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) + br label %continue__1 + +test5__1: ; preds = %condContinue__6 + %28 = load i2, i2* @PauliX, align 1 + %29 = icmp eq i2 %from, %28 + br i1 %29, label %condTrue__7, label %condContinue__7 + +condTrue__7: ; preds = %test5__1 + %30 = load i2, i2* @PauliY, align 1 + %31 = icmp eq i2 %to, %30 + br label %condContinue__7 + +condContinue__7: ; preds = %condTrue__7, %test5__1 + %32 = phi i1 [ %31, %condTrue__7 ], [ %29, %test5__1 ] + br i1 %32, label %then5__1, label %else__1 + +then5__1: ; preds = %condContinue__7 + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %condContinue__7 + %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__fail(%String* %33) + unreachable + +continue__1: ; preds = %then5__1, %then4__1, %then3__1, %then2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__sadj(%Qubit* %qubit) + ret void +} + +declare %String* @__quantum__rt__string_create(i8*) + +declare void @__quantum__rt__fail(%String*) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____adj(%Qubit* %qubit, i2 %from, i2 %to) { +entry: + %0 = icmp eq i2 %from, %to + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + br label %continue__1 + +test1__1: ; preds = %entry + %1 = load i2, i2* @PauliZ, align 1 + %2 = icmp eq i2 %from, %1 + br i1 %2, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %test1__1 + %3 = load i2, i2* @PauliX, align 1 + %4 = icmp eq i2 %to, %3 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %test1__1 + %5 = phi i1 [ %4, %condTrue__1 ], [ %2, %test1__1 ] + %6 = xor i1 %5, true + br i1 %6, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %condContinue__1 + %7 = load i2, i2* @PauliX, align 1 + %8 = icmp eq i2 %from, %7 + br i1 %8, label %condTrue__3, label %condContinue__3 + +condTrue__3: ; preds = %condTrue__2 + %9 = load i2, i2* @PauliZ, align 1 + %10 = icmp eq i2 %to, %9 + br label %condContinue__3 + +condContinue__3: ; preds = %condTrue__3, %condTrue__2 + %11 = phi i1 [ %10, %condTrue__3 ], [ %8, %condTrue__2 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condContinue__1 + %12 = phi i1 [ %11, %condContinue__3 ], [ %5, %condContinue__1 ] + br i1 %12, label %then1__1, label %test2__1 + +then1__1: ; preds = %condContinue__2 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) + br label %continue__1 + +test2__1: ; preds = %condContinue__2 + %13 = load i2, i2* @PauliZ, align 1 + %14 = icmp eq i2 %from, %13 + br i1 %14, label %condTrue__4, label %condContinue__4 + +condTrue__4: ; preds = %test2__1 + %15 = load i2, i2* @PauliY, align 1 + %16 = icmp eq i2 %to, %15 + br label %condContinue__4 + +condContinue__4: ; preds = %condTrue__4, %test2__1 + %17 = phi i1 [ %16, %condTrue__4 ], [ %14, %test2__1 ] + br i1 %17, label %then2__1, label %test3__1 + +then2__1: ; preds = %condContinue__4 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) + br label %continue__1 + +test3__1: ; preds = %condContinue__4 + %18 = load i2, i2* @PauliY, align 1 + %19 = icmp eq i2 %from, %18 + br i1 %19, label %condTrue__5, label %condContinue__5 + +condTrue__5: ; preds = %test3__1 + %20 = load i2, i2* @PauliZ, align 1 + %21 = icmp eq i2 %to, %20 + br label %condContinue__5 + +condContinue__5: ; preds = %condTrue__5, %test3__1 + %22 = phi i1 [ %21, %condTrue__5 ], [ %19, %test3__1 ] + br i1 %22, label %then3__1, label %test4__1 + +then3__1: ; preds = %condContinue__5 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) + br label %continue__1 + +test4__1: ; preds = %condContinue__5 + %23 = load i2, i2* @PauliY, align 1 + %24 = icmp eq i2 %from, %23 + br i1 %24, label %condTrue__6, label %condContinue__6 + +condTrue__6: ; preds = %test4__1 + %25 = load i2, i2* @PauliX, align 1 + %26 = icmp eq i2 %to, %25 + br label %condContinue__6 + +condContinue__6: ; preds = %condTrue__6, %test4__1 + %27 = phi i1 [ %26, %condTrue__6 ], [ %24, %test4__1 ] + br i1 %27, label %then4__1, label %test5__1 + +then4__1: ; preds = %condContinue__6 + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) + br label %continue__1 + +test5__1: ; preds = %condContinue__6 + %28 = load i2, i2* @PauliX, align 1 + %29 = icmp eq i2 %from, %28 + br i1 %29, label %condTrue__7, label %condContinue__7 + +condTrue__7: ; preds = %test5__1 + %30 = load i2, i2* @PauliY, align 1 + %31 = icmp eq i2 %to, %30 + br label %condContinue__7 + +condContinue__7: ; preds = %condTrue__7, %test5__1 + %32 = phi i1 [ %31, %condTrue__7 ], [ %29, %test5__1 ] + br i1 %32, label %then5__1, label %else__1 + +then5__1: ; preds = %condContinue__7 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %condContinue__7 + %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @1, i32 0, i32 0)) + call void @__quantum__rt__fail(%String* %33) + unreachable + +continue__1: ; preds = %then5__1, %then4__1, %then3__1, %then2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %target, %Qubit* %control1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control1, %Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %control1) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %target, %Qubit* %control1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control1, %Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %target, %Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__t(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__tadj(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %target, %Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control1, %Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %target, %Qubit* %control1) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %control1) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control1, %Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %target, %Qubit* %control1) + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %target) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to %Qubit** + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 1) + %4 = bitcast i8* %3 to %Qubit** + store %Qubit* %control1, %Qubit** %2, align 8 + store %Qubit* %control2, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %0, %Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %target) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qubit) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResults = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %resultsValues = load %Array*, %Array** %2, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onEqualOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 - %onNonEqualOp = load %Callable*, %Callable** %4, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 - %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %9) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__z(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %control = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__qis__cz(%Qubit* %control, %Qubit* %qubit) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %7 = icmp eq i64 %6, 2 + br i1 %7, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %10) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12, align 8 + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %13) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %15 = bitcast i8* %14 to %Qubit** + %16 = load %Qubit*, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %qubit, %Qubit* %16) + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %18 = bitcast i8* %17 to %Qubit** + %19 = load %Qubit*, %Qubit** %18, align 8 + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %19) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) + %21 = bitcast i8* %20 to %Qubit** + %22 = load %Qubit*, %Qubit** %21, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %22, %Qubit* %qubit) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) + %24 = bitcast i8* %23 to %Qubit** + %25 = load %Qubit*, %Qubit** %24, align 8 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %27 = bitcast i8* %26 to %Qubit** + %28 = load %Qubit*, %Qubit** %27, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %25, %Qubit* %28) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %30 = bitcast i8* %29 to %Qubit** + %31 = load %Qubit*, %Qubit** %30, align 8 + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %31) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) + %33 = bitcast i8* %32 to %Qubit** + %34 = load %Qubit*, %Qubit** %33, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %34, %Qubit* %qubit) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %36 = bitcast i8* %35 to %Qubit** + %37 = load %Qubit*, %Qubit** %36, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %qubit, %Qubit* %37) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %39 = bitcast i8* %38 to %Qubit** + %40 = load %Qubit*, %Qubit** %39, align 8 + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %40) + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) + %42 = bitcast i8* %41 to %Qubit** + %43 = load %Qubit*, %Qubit** %42, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %45 = bitcast i8* %44 to %Qubit** + %46 = load %Qubit*, %Qubit** %45, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %43, %Qubit* %46) + br label %continue__1 + +else__1: ; preds = %test2__1 + %47 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Z, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %47) + %48 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %49 = bitcast %Tuple* %48 to { %Array*, %Qubit* }* + %50 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %49, i32 0, i32 0 + %51 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %49, i32 0, i32 1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %9, %Callable** %7, align 8 - store %Array* %ctls, %Array** %8, align 8 - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %5) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 - %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 - %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + store %Array* %ctls, %Array** %50, align 8 + store %Qubit* %qubit, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %47, { %Array*, %Qubit* }* %49) + call void @__quantum__rt__capture_update_reference_count(%Callable* %47, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %47, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %48, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__adj(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__ctl(%Array* %ctls, { %Qubit*, %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control1 = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %control2 = load %Qubit*, %Qubit** %2, align 8 + %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 0) + %6 = bitcast i8* %5 to %Qubit** + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 1) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %control1, %Qubit** %6, align 8 + store %Qubit* %control2, %Qubit** %8, align 8 + %9 = call %Array* @__quantum__rt__array_concatenate(%Array* %ctls, %Array* %4) + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %9, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %ctls, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %control = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %qubit) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %7 = icmp eq i64 %6, 2 + br i1 %7, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12, align 8 + call void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %10, %Qubit* %13, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test2__1 + %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) call void @__quantum__rt__callable_make_controlled(%Callable* %14) + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %16 = bitcast %Tuple* %15 to { %Array*, %Qubit* }* + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %14, %Callable** %12, align 8 - store %Array* %ctls, %Array** %13, align 8 - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %10) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + store %Array* %ctls, %Array** %17, align 8 + store %Qubit* %qubit, %Qubit** %18, align 8 + call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %14, { %Array*, %Qubit* }* %16) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) ret void } -define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control1 = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %control2 = load %Qubit*, %Qubit** %2, align 8 + %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 3)) + %5 = bitcast %Tuple* %4 to { %Qubit*, %Qubit*, %Qubit* }* + %6 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %5, i32 0, i32 2 + store %Qubit* %control1, %Qubit** %6, align 8 + store %Qubit* %control2, %Qubit** %7, align 8 + store %Qubit* %target, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CCNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit*, %Qubit* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic___c1b23cd4538f4bf9ab69d9c3e574aa70___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %controls = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %arg = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 + %3 = bitcast { %Qubit*, %Qubit* }* %arg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %numControls = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %numControlPairs = sdiv i64 %numControls, 2 + %temps = call %Array* @__quantum__rt__qubit_allocate_array(i64 %numControlPairs) + call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 1) + %4 = sub i64 %numControlPairs, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %__qsVar0__numPair__ = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %5 = icmp sle i64 %__qsVar0__numPair__, %4 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = mul i64 2, %__qsVar0__numPair__ + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %6) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = mul i64 2, %__qsVar0__numPair__ + %11 = add i64 %10, 1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %11) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0__numPair__) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %9, %Qubit* %14, %Qubit* %17) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %__qsVar0__numPair__, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %19 = srem i64 %numControls, 2 + %20 = icmp eq i64 %19, 0 + br i1 %20, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %temps, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) + %23 = bitcast i8* %22 to %Qubit** + %24 = sub i64 %numControls, 1 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %24) + %26 = bitcast i8* %25 to %Qubit** + %27 = load %Qubit*, %Qubit** %26, align 8 + store %Qubit* %27, %Qubit** %23, align 8 + %28 = call %Array* @__quantum__rt__array_concatenate(%Array* %temps, %Array* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %__qsVar1__newControls__ = phi %Array* [ %temps, %condTrue__1 ], [ %28, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 1) + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %30 = bitcast %Tuple* %29 to { %Array*, { %Qubit*, %Qubit* }* }* + %31 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %30, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) + store %Array* %__qsVar1__newControls__, %Array** %31, align 8 + store { %Qubit*, %Qubit* }* %arg, { %Qubit*, %Qubit* }** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %29, %Tuple* null) + %33 = sub i64 %numControlPairs, 1 + %34 = sub i64 %33, 0 + %35 = sdiv i64 %34, 1 + %36 = mul i64 1, %35 + %37 = add i64 0, %36 + %38 = load %Range, %Range* @EmptyRange, align 4 + %39 = insertvalue %Range %38, i64 %37, 0 + %40 = insertvalue %Range %39, i64 -1, 1 + %41 = insertvalue %Range %40, i64 0, 2 + %42 = extractvalue %Range %41, 0 + %43 = extractvalue %Range %41, 1 + %44 = extractvalue %Range %41, 2 + br label %preheader__1 + +preheader__1: ; preds = %condContinue__1 + %45 = icmp sgt i64 %43, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0____qsVar0__numPair____ = phi i64 [ %42, %preheader__1 ], [ %61, %exiting__2 ] + %46 = icmp sle i64 %__qsVar0____qsVar0__numPair____, %44 + %47 = icmp sge i64 %__qsVar0____qsVar0__numPair____, %44 + %48 = select i1 %45, i1 %46, i1 %47 + br i1 %48, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %49 = mul i64 2, %__qsVar0____qsVar0__numPair____ + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %49) + %51 = bitcast i8* %50 to %Qubit** + %52 = load %Qubit*, %Qubit** %51, align 8 + %53 = mul i64 2, %__qsVar0____qsVar0__numPair____ + %54 = add i64 %53, 1 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %54) + %56 = bitcast i8* %55 to %Qubit** + %57 = load %Qubit*, %Qubit** %56, align 8 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0____qsVar0__numPair____) + %59 = bitcast i8* %58 to %Qubit** + %60 = load %Qubit*, %Qubit** %59, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %52, %Qubit* %57, %Qubit* %60) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %61 = add i64 %__qsVar0____qsVar0__numPair____, %43 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %temps) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit* }* + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Qubit*, %Qubit** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit* }* + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Qubit*, %Qubit** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %3, { %Qubit*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %3, { %Qubit*, %Qubit* }* %4) + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define internal void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %controls = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %arg = load %Qubit*, %Qubit** %2, align 8 + %numControls = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %numControlPairs = sdiv i64 %numControls, 2 + %temps = call %Array* @__quantum__rt__qubit_allocate_array(i64 %numControlPairs) + call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 1) + %3 = sub i64 %numControlPairs, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %__qsVar0__numPair__ = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %4 = icmp sle i64 %__qsVar0__numPair__, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = mul i64 2, %__qsVar0__numPair__ + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %5) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %9 = mul i64 2, %__qsVar0__numPair__ + %10 = add i64 %9, 1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %10) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0__numPair__) + %15 = bitcast i8* %14 to %Qubit** + %16 = load %Qubit*, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %8, %Qubit* %13, %Qubit* %16) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %__qsVar0__numPair__, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %18 = srem i64 %numControls, 2 + %19 = icmp eq i64 %18, 0 + br i1 %19, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %temps, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + %20 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 0) + %22 = bitcast i8* %21 to %Qubit** + %23 = sub i64 %numControls, 1 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %23) + %25 = bitcast i8* %24 to %Qubit** + %26 = load %Qubit*, %Qubit** %25, align 8 + store %Qubit* %26, %Qubit** %22, align 8 + %27 = call %Array* @__quantum__rt__array_concatenate(%Array* %temps, %Array* %20) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %__qsVar1__newControls__ = phi %Array* [ %temps, %condTrue__1 ], [ %27, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %29 = bitcast %Tuple* %28 to { %Array*, %Qubit* }* + %30 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %29, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 1) + store %Array* %__qsVar1__newControls__, %Array** %30, align 8 + store %Qubit* %arg, %Qubit** %31, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %28, %Tuple* null) + %32 = sub i64 %numControlPairs, 1 + %33 = sub i64 %32, 0 + %34 = sdiv i64 %33, 1 + %35 = mul i64 1, %34 + %36 = add i64 0, %35 + %37 = load %Range, %Range* @EmptyRange, align 4 + %38 = insertvalue %Range %37, i64 %36, 0 + %39 = insertvalue %Range %38, i64 -1, 1 + %40 = insertvalue %Range %39, i64 0, 2 + %41 = extractvalue %Range %40, 0 + %42 = extractvalue %Range %40, 1 + %43 = extractvalue %Range %40, 2 + br label %preheader__1 + +preheader__1: ; preds = %condContinue__1 + %44 = icmp sgt i64 %42, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0____qsVar0__numPair____ = phi i64 [ %41, %preheader__1 ], [ %60, %exiting__2 ] + %45 = icmp sle i64 %__qsVar0____qsVar0__numPair____, %43 + %46 = icmp sge i64 %__qsVar0____qsVar0__numPair____, %43 + %47 = select i1 %44, i1 %45, i1 %46 + br i1 %47, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %48 = mul i64 2, %__qsVar0____qsVar0__numPair____ + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %48) + %50 = bitcast i8* %49 to %Qubit** + %51 = load %Qubit*, %Qubit** %50, align 8 + %52 = mul i64 2, %__qsVar0____qsVar0__numPair____ + %53 = add i64 %52, 1 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %53) + %55 = bitcast i8* %54 to %Qubit** + %56 = load %Qubit*, %Qubit** %55, align 8 + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0____qsVar0__numPair____) + %58 = bitcast i8* %57 to %Qubit** + %59 = load %Qubit*, %Qubit** %58, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %51, %Qubit* %56, %Qubit* %59) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %60 = add i64 %__qsVar0____qsVar0__numPair____, %42 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %temps) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %0 = call %Result* @__quantum__qis__m__body(%Qubit* %qubit) + ret %Result* %0 +} + +declare %Result* @__quantum__qis__m__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__R__body(i2 %pauli, double %theta, %Qubit* %qubit) { +entry: + %0 = load i2, i2* @PauliX, align 1 + %1 = icmp eq i2 %pauli, %0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = load i2, i2* @PauliY, align 1 + %3 = icmp eq i2 %pauli, %2 + br i1 %3, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Intrinsic__Ry__body(double %theta, %Qubit* %qubit) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %4 = load i2, i2* @PauliZ, align 1 + %5 = icmp eq i2 %pauli, %4 + br i1 %5, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test2__1 + %6 = fneg double %theta + %7 = fdiv double %6, 2.000000e+00 + call void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____body(double %7) + br label %continue__1 + +continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__rx(double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Ry__body(double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__ry(double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__rz(double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R__adj(i2 %pauli, double %theta, %Qubit* %qubit) { +entry: + %0 = load i2, i2* @PauliX, align 1 + %1 = icmp eq i2 %pauli, %0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = load i2, i2* @PauliY, align 1 + %3 = icmp eq i2 %pauli, %2 + br i1 %3, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Intrinsic__Ry__adj(double %theta, %Qubit* %qubit) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %4 = load i2, i2* @PauliZ, align 1 + %5 = icmp eq i2 %pauli, %4 + br i1 %5, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + call void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test2__1 + %6 = fneg double %theta + %7 = fdiv double %6, 2.000000e+00 + call void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____adj(double %7) + br label %continue__1 + +continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qubit) { +entry: + %0 = fneg double %theta + call void @Microsoft__Quantum__Intrinsic__Rx__body(double %0, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Ry__adj(double %theta, %Qubit* %qubit) { +entry: + %0 = fneg double %theta + call void @Microsoft__Quantum__Intrinsic__Ry__body(double %0, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qubit) { +entry: + %0 = fneg double %theta + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %0, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = load i2, i2* @PauliX, align 1 + %5 = icmp eq i2 %pauli, %4 + br i1 %5, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %theta, double* %8, align 8 + store %Qubit* %qubit, %Qubit** %9, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %7) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = load i2, i2* @PauliY, align 1 + %11 = icmp eq i2 %pauli, %10 + br i1 %11, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Qubit* }* + %14 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %13, i32 0, i32 1 + store double %theta, double* %14, align 8 + store %Qubit* %qubit, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %13) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %16 = load i2, i2* @PauliZ, align 1 + %17 = icmp eq i2 %pauli, %16 + br i1 %17, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Qubit* }* + %20 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %19, i32 0, i32 1 + store double %theta, double* %20, align 8 + store %Qubit* %qubit, %Qubit** %21, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %19) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +else__1: ; preds = %test2__1 + %22 = fneg double %theta + %23 = fdiv double %22, 2.000000e+00 + call void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____ctl(%Array* %__controlQubits__, double %23) + br label %continue__1 + +continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %4 = icmp eq i64 %3, 0 + br i1 %4, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__rx(double %theta, %Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %6 = icmp eq i64 %5, 1 + br i1 %6, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %7 = load i2, i2* @PauliZ, align 1 + %8 = load i2, i2* @PauliX, align 1 + call void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____body(%Qubit* %qubit, i2 %7, i2 %8) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { double, %Qubit* }* + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 1 + store double %theta, double* %11, align 8 + store %Qubit* %qubit, %Qubit** %12, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %10) + %13 = load i2, i2* @PauliZ, align 1 + %14 = load i2, i2* @PauliX, align 1 + call void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____adj(%Qubit* %qubit, i2 %13, i2 %14) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Rx, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %15) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %17 = bitcast %Tuple* %16 to { %Array*, { double, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %17, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Qubit* }* + %22 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %21, i32 0, i32 1 + store double %theta, double* %22, align 8 + store %Qubit* %qubit, %Qubit** %23, align 8 + store %Array* %ctls, %Array** %18, align 8 + store { double, %Qubit* }* %21, { double, %Qubit* }** %19, align 8 + call void @Microsoft__Quantum__Intrinsic___574075b4ddd242719dc782bda73052ae___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %4 = icmp eq i64 %3, 0 + br i1 %4, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__ry(double %theta, %Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %6 = icmp eq i64 %5, 1 + br i1 %6, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %7 = load i2, i2* @PauliZ, align 1 + %8 = load i2, i2* @PauliY, align 1 + call void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____body(%Qubit* %qubit, i2 %7, i2 %8) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { double, %Qubit* }* + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 1 + store double %theta, double* %11, align 8 + store %Qubit* %qubit, %Qubit** %12, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %10) + %13 = load i2, i2* @PauliZ, align 1 + %14 = load i2, i2* @PauliY, align 1 + call void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____adj(%Qubit* %qubit, i2 %13, i2 %14) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Ry, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %15) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %17 = bitcast %Tuple* %16 to { %Array*, { double, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %17, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Qubit* }* + %22 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %21, i32 0, i32 1 + store double %theta, double* %22, align 8 + store %Qubit* %qubit, %Qubit** %23, align 8 + store %Array* %ctls, %Array** %18, align 8 + store { double, %Qubit* }* %21, { double, %Qubit* }** %19, align 8 + call void @Microsoft__Quantum__Intrinsic___574075b4ddd242719dc782bda73052ae___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %4 = icmp eq i64 %3, 0 + br i1 %4, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %6 = icmp eq i64 %5, 1 + br i1 %6, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %7 = fdiv double %theta, 2.000000e+00 + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %7, %Qubit* %qubit) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %10, %Qubit* %qubit) + %11 = fneg double %theta + %12 = fdiv double %11, 2.000000e+00 + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %12, %Qubit* %qubit) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %14 = bitcast i8* %13 to %Qubit** + %15 = load %Qubit*, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %15, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test1__1 + %16 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Rz, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %16) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %18 = bitcast %Tuple* %17 to { %Array*, { double, %Qubit* }* }* + %19 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %18, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { double, %Qubit* }* + %23 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %22, i32 0, i32 1 + store double %theta, double* %23, align 8 + store %Qubit* %qubit, %Qubit** %24, align 8 + store %Array* %ctls, %Array** %19, align 8 + store { double, %Qubit* }* %22, { double, %Qubit* }** %20, align 8 + call void @Microsoft__Quantum__Intrinsic___574075b4ddd242719dc782bda73052ae___QsRef23__ApplyWithLessControlsA____body(%Callable* %16, { %Array*, { double, %Qubit* }* }* %18) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = load i2, i2* @PauliX, align 1 + %5 = icmp eq i2 %pauli, %4 + br i1 %5, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %theta, double* %8, align 8 + store %Qubit* %qubit, %Qubit** %9, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %7) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = load i2, i2* @PauliY, align 1 + %11 = icmp eq i2 %pauli, %10 + br i1 %11, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Qubit* }* + %14 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %13, i32 0, i32 1 + store double %theta, double* %14, align 8 + store %Qubit* %qubit, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %13) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %16 = load i2, i2* @PauliZ, align 1 + %17 = icmp eq i2 %pauli, %16 + br i1 %17, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Qubit* }* + %20 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %19, i32 0, i32 1 + store double %theta, double* %20, align 8 + store %Qubit* %qubit, %Qubit** %21, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %19) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +else__1: ; preds = %test2__1 + %22 = fneg double %theta + %23 = fdiv double %22, 2.000000e+00 + call void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____ctladj(%Array* %__controlQubits__, double %23) + br label %continue__1 + +continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { double, %Qubit* }* + %5 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 1 + %7 = fneg double %theta + store double %7, double* %5, align 8 + store %Qubit* %qubit, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { double, %Qubit* }* + %5 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 1 + %7 = fneg double %theta + store double %7, double* %5, align 8 + store %Qubit* %qubit, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) ret void } -define internal void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { double, %Qubit* }* + %5 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 1 + %7 = fneg double %theta + store double %7, double* %5, align 8 + store %Qubit* %qubit, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) ret void } -declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) - -declare void @__quantum__rt__callable_make_controlled(%Callable*) - -declare void @__quantum__rt__array_update_reference_count(%Array*, i32) - -define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +define internal void @Microsoft__Quantum__Intrinsic__R1__body(double %theta, %Qubit* %qubit) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + %0 = load i2, i2* @PauliZ, align 1 + call void @Microsoft__Quantum__Intrinsic__R__body(i2 %0, double %theta, %Qubit* %qubit) + %1 = load i2, i2* @PauliI, align 1 + %2 = fneg double %theta + call void @Microsoft__Quantum__Intrinsic__R__body(i2 %1, double %2, %Qubit* %qubit) ret void } -define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +define internal void @Microsoft__Quantum__Intrinsic__R1__adj(double %theta, %Qubit* %qubit) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + %0 = load i2, i2* @PauliI, align 1 + %1 = fneg double %theta + call void @Microsoft__Quantum__Intrinsic__R__adj(i2 %0, double %1, %Qubit* %qubit) + %2 = load i2, i2* @PauliZ, align 1 + call void @Microsoft__Quantum__Intrinsic__R__adj(i2 %2, double %theta, %Qubit* %qubit) ret void } -define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__R1Frac__body(i64 %numerator, i64 %power, %Qubit* %qubit) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + %0 = load i2, i2* @PauliZ, align 1 + %1 = sub i64 0, %numerator + %2 = add i64 %power, 1 + call void @Microsoft__Quantum__Intrinsic__RFrac__body(i2 %0, i64 %1, i64 %2, %Qubit* %qubit) + %3 = load i2, i2* @PauliI, align 1 + %4 = add i64 %power, 1 + call void @Microsoft__Quantum__Intrinsic__RFrac__body(i2 %3, i64 %numerator, i64 %4, %Qubit* %qubit) ret void } -define internal void @Lifted__PartialApplication__2__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__RFrac__body(i2 %pauli, i64 %numerator, i64 %power, %Qubit* %qubit) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %0 = call double @Microsoft__Quantum__Math__PI__body() + %1 = fmul double -2.000000e+00, %0 + %2 = sitofp i64 %numerator to double + %3 = fmul double %1, %2 + %4 = sitofp i64 %power to double + %5 = call double @llvm.pow.f64(double 2.000000e+00, double %4) + %angle = fdiv double %3, %5 + call void @Microsoft__Quantum__Intrinsic__R__body(i2 %pauli, double %angle, %Qubit* %qubit) ret void } -declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) - -declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) - -declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic__R1Frac__adj(i64 %numerator, i64 %power, %Qubit* %qubit) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResults = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %resultsValues = load %Array*, %Array** %2, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onEqualOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 - %onNonEqualOp = load %Callable*, %Callable** %4, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 - %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %9) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %9, %Callable** %7, align 8 - store %Array* %ctls, %Array** %8, align 8 - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 - %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 - %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %14) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %14, %Callable** %12, align 8 - store %Array* %ctls, %Array** %13, align 8 - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + %0 = load i2, i2* @PauliI, align 1 + %1 = add i64 %power, 1 + call void @Microsoft__Quantum__Intrinsic__RFrac__adj(i2 %0, i64 %numerator, i64 %1, %Qubit* %qubit) + %2 = load i2, i2* @PauliZ, align 1 + %3 = sub i64 0, %numerator + %4 = add i64 %power, 1 + call void @Microsoft__Quantum__Intrinsic__RFrac__adj(i2 %2, i64 %3, i64 %4, %Qubit* %qubit) ret void } -define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__RFrac__adj(i2 %pauli, i64 %numerator, i64 %power, %Qubit* %qubit) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + %0 = call double @Microsoft__Quantum__Math__PI__body() + %1 = fmul double -2.000000e+00, %0 + %2 = sitofp i64 %numerator to double + %3 = fmul double %1, %2 + %4 = sitofp i64 %power to double + %5 = call double @llvm.pow.f64(double 2.000000e+00, double %4) + %__qsVar0__angle__ = fdiv double %3, %5 + call void @Microsoft__Quantum__Intrinsic__R__adj(i2 %pauli, double %__qsVar0__angle__, %Qubit* %qubit) ret void } -define internal void @Lifted__PartialApplication__3__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__R1Frac__ctl(%Array* %__controlQubits__, { i64, i64, %Qubit* }* %0) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 0 + %numerator = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 1 + %power = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, i64, i64, %Qubit* }* getelementptr ({ i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { i2, i64, i64, %Qubit* }* + %6 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 2 + %9 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 3 + %10 = load i2, i2* @PauliZ, align 1 + %11 = sub i64 0, %numerator + %12 = add i64 %power, 1 + store i2 %10, i2* %6, align 1 + store i64 %11, i64* %7, align 4 + store i64 %12, i64* %8, align 4 + store %Qubit* %qubit, %Qubit** %9, align 8 + call void @Microsoft__Quantum__Intrinsic__RFrac__ctl(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %5) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, i64, i64, %Qubit* }* getelementptr ({ i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { i2, i64, i64, %Qubit* }* + %15 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %14, i32 0, i32 0 + %16 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %14, i32 0, i32 1 + %17 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %14, i32 0, i32 2 + %18 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %14, i32 0, i32 3 + %19 = load i2, i2* @PauliI, align 1 + %20 = add i64 %power, 1 + store i2 %19, i2* %15, align 1 + store i64 %numerator, i64* %16, align 4 + store i64 %20, i64* %17, align 4 + store %Qubit* %qubit, %Qubit** %18, align 8 + call void @Microsoft__Quantum__Intrinsic__RFrac__ctl(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %14) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) ret void } -define internal void @Lifted__PartialApplication__3__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__RFrac__ctl(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %0) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 1 + %numerator = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 2 + %power = load i64, i64* %3, align 4 + %4 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 3 + %qubit = load %Qubit*, %Qubit** %4, align 8 + %5 = call double @Microsoft__Quantum__Math__PI__body() + %6 = fmul double -2.000000e+00, %5 + %7 = sitofp i64 %numerator to double + %8 = fmul double %6, %7 + %9 = sitofp i64 %power to double + %10 = call double @llvm.pow.f64(double 2.000000e+00, double %9) + %angle = fdiv double %8, %10 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i2, double, %Qubit* }* + %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 2 + store i2 %pauli, i2* %13, align 1 + store double %angle, double* %14, align 8 + store %Qubit* %qubit, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) ret void } -define internal void @Lifted__PartialApplication__3__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__R1Frac__ctladj(%Array* %__controlQubits__, { i64, i64, %Qubit* }* %0) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 0 + %numerator = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 1 + %power = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, i64, i64, %Qubit* }* getelementptr ({ i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { i2, i64, i64, %Qubit* }* + %6 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 2 + %9 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 3 + %10 = load i2, i2* @PauliI, align 1 + %11 = add i64 %power, 1 + store i2 %10, i2* %6, align 1 + store i64 %numerator, i64* %7, align 4 + store i64 %11, i64* %8, align 4 + store %Qubit* %qubit, %Qubit** %9, align 8 + call void @Microsoft__Quantum__Intrinsic__RFrac__ctladj(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %5) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, i64, i64, %Qubit* }* getelementptr ({ i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i2, i64, i64, %Qubit* }* + %14 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %13, i32 0, i32 3 + %18 = load i2, i2* @PauliZ, align 1 + %19 = sub i64 0, %numerator + %20 = add i64 %power, 1 + store i2 %18, i2* %14, align 1 + store i64 %19, i64* %15, align 4 + store i64 %20, i64* %16, align 4 + store %Qubit* %qubit, %Qubit** %17, align 8 + call void @Microsoft__Quantum__Intrinsic__RFrac__ctladj(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %13) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) ret void } -define internal void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +define internal void @Microsoft__Quantum__Intrinsic__RFrac__ctladj(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %0) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 1 + %numerator = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 2 + %power = load i64, i64* %3, align 4 + %4 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 3 + %qubit = load %Qubit*, %Qubit** %4, align 8 + %5 = call double @Microsoft__Quantum__Math__PI__body() + %6 = fmul double -2.000000e+00, %5 + %7 = sitofp i64 %numerator to double + %8 = fmul double %6, %7 + %9 = sitofp i64 %power to double + %10 = call double @llvm.pow.f64(double 2.000000e+00, double %9) + %__qsVar0__angle__ = fdiv double %8, %10 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i2, double, %Qubit* }* + %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 2 + store i2 %pauli, i2* %13, align 1 + store double %__qsVar0__angle__, double* %14, align 8 + store %Qubit* %qubit, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) ret void } -define internal void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +define internal void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + call void @__quantum__qis__reset__body(%Qubit* %qubit) ret void } -define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +declare void @__quantum__qis__reset__body(%Qubit*) + +; Function Attrs: nounwind readnone speculatable willreturn +declare double @llvm.pow.f64(double, double) #0 + +declare void @__quantum__qis__rx(double, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic___574075b4ddd242719dc782bda73052ae___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %controls = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %arg = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + %3 = bitcast { double, %Qubit* }* %arg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %numControls = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %numControlPairs = sdiv i64 %numControls, 2 + %temps = call %Array* @__quantum__rt__qubit_allocate_array(i64 %numControlPairs) + call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 1) + %4 = sub i64 %numControlPairs, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %__qsVar0__numPair__ = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %5 = icmp sle i64 %__qsVar0__numPair__, %4 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = mul i64 2, %__qsVar0__numPair__ + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %6) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = mul i64 2, %__qsVar0__numPair__ + %11 = add i64 %10, 1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %11) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0__numPair__) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %9, %Qubit* %14, %Qubit* %17) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %__qsVar0__numPair__, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %19 = srem i64 %numControls, 2 + %20 = icmp eq i64 %19, 0 + br i1 %20, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %temps, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) + %23 = bitcast i8* %22 to %Qubit** + %24 = sub i64 %numControls, 1 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %24) + %26 = bitcast i8* %25 to %Qubit** + %27 = load %Qubit*, %Qubit** %26, align 8 + store %Qubit* %27, %Qubit** %23, align 8 + %28 = call %Array* @__quantum__rt__array_concatenate(%Array* %temps, %Array* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %__qsVar1__newControls__ = phi %Array* [ %temps, %condTrue__1 ], [ %28, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 1) + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %30 = bitcast %Tuple* %29 to { %Array*, { double, %Qubit* }* }* + %31 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %30, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) + store %Array* %__qsVar1__newControls__, %Array** %31, align 8 + store { double, %Qubit* }* %arg, { double, %Qubit* }** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %29, %Tuple* null) + %33 = sub i64 %numControlPairs, 1 + %34 = sub i64 %33, 0 + %35 = sdiv i64 %34, 1 + %36 = mul i64 1, %35 + %37 = add i64 0, %36 + %38 = load %Range, %Range* @EmptyRange, align 4 + %39 = insertvalue %Range %38, i64 %37, 0 + %40 = insertvalue %Range %39, i64 -1, 1 + %41 = insertvalue %Range %40, i64 0, 2 + %42 = extractvalue %Range %41, 0 + %43 = extractvalue %Range %41, 1 + %44 = extractvalue %Range %41, 2 + br label %preheader__1 + +preheader__1: ; preds = %condContinue__1 + %45 = icmp sgt i64 %43, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0____qsVar0__numPair____ = phi i64 [ %42, %preheader__1 ], [ %61, %exiting__2 ] + %46 = icmp sle i64 %__qsVar0____qsVar0__numPair____, %44 + %47 = icmp sge i64 %__qsVar0____qsVar0__numPair____, %44 + %48 = select i1 %45, i1 %46, i1 %47 + br i1 %48, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %49 = mul i64 2, %__qsVar0____qsVar0__numPair____ + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %49) + %51 = bitcast i8* %50 to %Qubit** + %52 = load %Qubit*, %Qubit** %51, align 8 + %53 = mul i64 2, %__qsVar0____qsVar0__numPair____ + %54 = add i64 %53, 1 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %54) + %56 = bitcast i8* %55 to %Qubit** + %57 = load %Qubit*, %Qubit** %56, align 8 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0____qsVar0__numPair____) + %59 = bitcast i8* %58 to %Qubit** + %60 = load %Qubit*, %Qubit** %59, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %52, %Qubit* %57, %Qubit* %60) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %61 = add i64 %__qsVar0____qsVar0__numPair____, %43 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %temps) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) ret void } -define internal void @Lifted__PartialApplication__4__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__Rx__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__body(double %3, %Qubit* %4) ret void } -define internal void @Lifted__PartialApplication__4__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__Rx__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__adj(double %3, %Qubit* %4) ret void } -define internal void @Lifted__PartialApplication__4__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %3, { double, %Qubit* }* %4) ret void } -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctladj(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResults = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %resultsValues = load %Array*, %Array** %2, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onEqualOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 - %onNonEqualOp = load %Callable*, %Callable** %4, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 - %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_make_controlled(%Callable* %9) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %9, %Callable** %7, align 8 - store %Array* %ctls, %Array** %8, align 8 - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__5, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 - %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 - %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %14) - call void @__quantum__rt__callable_make_controlled(%Callable* %14) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %14, %Callable** %12, align 8 - store %Array* %ctls, %Array** %13, align 8 - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__6, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %3, { double, %Qubit* }* %4) ret void } -define internal void @Lifted__PartialApplication__5__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +declare void @__quantum__qis__ry(double, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Ry__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__body(double %3, %Qubit* %4) ret void } -define internal void @Lifted__PartialApplication__5__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__Ry__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__adj(double %3, %Qubit* %4) ret void } -define internal void @Lifted__PartialApplication__5__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__Ry__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %3, { double, %Qubit* }* %4) ret void } -define internal void @Lifted__PartialApplication__5__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__Ry__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %3, { double, %Qubit* }* %4) ret void } -define internal void @Lifted__PartialApplication__6__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +declare void @__quantum__qis__rz(double, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Rz__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %3, %Qubit* %4) ret void } -define internal void @Lifted__PartialApplication__6__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__Rz__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__adj(double %3, %Qubit* %4) ret void } -define internal void @Lifted__PartialApplication__6__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__Rz__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %3, { double, %Qubit* }* %4) ret void } -define internal void @Lifted__PartialApplication__6__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__Rz__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -declare void @__quantum__qis__applyifelseintrinsic__body(%Result*, %Callable*, %Callable*) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %3, { double, %Qubit* }* %4) + ret void +} + +declare void @__quantum__qis__s(%Qubit*) + +declare void @__quantum__qis__sadj(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %ctls, %Qubit* %qubit) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResult = load %Result*, %Result** %1, align 8 - %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %onResultZeroOp = load %Callable*, %Callable** %2, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onResultOneOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %8) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %8, %Callable** %6, align 8 - store %Array* %ctls, %Array** %7, align 8 - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__7, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %4) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* - %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 - %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__s(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %6) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %9, %Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %11 = bitcast i8* %10 to %Qubit** + %12 = load %Qubit*, %Qubit** %11, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %12, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test1__1 + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) call void @__quantum__rt__callable_make_controlled(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %15 = bitcast %Tuple* %14 to { %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %13, %Callable** %11, align 8 - store %Array* %ctls, %Array** %12, align 8 - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__8, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %9) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + store %Array* %ctls, %Array** %16, align 8 + store %Qubit* %qubit, %Qubit** %17, align 8 + call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) ret void } -define internal void @Lifted__PartialApplication__7__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__S__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %2) ret void } -define internal void @Lifted__PartialApplication__7__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__S__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %2) ret void } -define internal void @Lifted__PartialApplication__8__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__S__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %3, %Qubit* %4) ret void } -define internal void @Lifted__PartialApplication__8__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %ctls, %Qubit* %qubit) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResult = load %Result*, %Result** %1, align 8 - %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %onResultZeroOp = load %Callable*, %Callable** %2, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onResultOneOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %8) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %8, %Callable** %6, align 8 - store %Array* %ctls, %Array** %7, align 8 - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__9, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* - %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 - %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__sadj(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %6) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %9, %Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %11 = bitcast i8* %10 to %Qubit** + %12 = load %Qubit*, %Qubit** %11, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %12, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test1__1 + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) call void @__quantum__rt__callable_make_controlled(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %15 = bitcast %Tuple* %14 to { %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %13, %Callable** %11, align 8 - store %Array* %ctls, %Array** %12, align 8 - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__10, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + store %Array* %ctls, %Array** %16, align 8 + store %Qubit* %qubit, %Qubit** %17, align 8 + call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) ret void } -define internal void @Lifted__PartialApplication__9__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} +declare void @__quantum__rt__callable_make_adjoint(%Callable*) -define internal void @Lifted__PartialApplication__9__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} +declare void @__quantum__qis__t(%Qubit*) -define internal void @Lifted__PartialApplication__9__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} +declare void @__quantum__qis__tadj(%Qubit*) -define internal void @Lifted__PartialApplication__9__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %ctls, %Qubit* %qubit) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__t(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__R1Frac__body(i64 1, i64 3, %Qubit* %6) + call void @Microsoft__Quantum__Intrinsic__R1Frac__body(i64 1, i64 3, %Qubit* %qubit) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %9, %Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__R1Frac__adj(i64 1, i64 3, %Qubit* %qubit) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %11 = bitcast i8* %10 to %Qubit** + %12 = load %Qubit*, %Qubit** %11, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %12, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test1__1 + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__T, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %15 = bitcast %Tuple* %14 to { %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Array* %ctls, %Array** %16, align 8 + store %Qubit* %qubit, %Qubit** %17, align 8 + call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) ret void } -define internal void @Lifted__PartialApplication__10__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__T__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %2) ret void } -define internal void @Lifted__PartialApplication__10__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__T__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %2) ret void } -define internal void @Lifted__PartialApplication__10__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__T__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %3, %Qubit* %4) ret void } -define internal void @Lifted__PartialApplication__10__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__T__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %3, %Qubit* %4) ret void } -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctladj(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %ctls, %Qubit* %qubit) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResult = load %Result*, %Result** %1, align 8 - %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %onResultZeroOp = load %Callable*, %Callable** %2, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onResultOneOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %8) - call void @__quantum__rt__callable_make_controlled(%Callable* %8) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %8, %Callable** %6, align 8 - store %Array* %ctls, %Array** %7, align 8 - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__11, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* - %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 - %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__tadj(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__R1Frac__adj(i64 1, i64 3, %Qubit* %6) + call void @Microsoft__Quantum__Intrinsic__R1Frac__adj(i64 1, i64 3, %Qubit* %qubit) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %9, %Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__R1Frac__body(i64 1, i64 3, %Qubit* %qubit) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %11 = bitcast i8* %10 to %Qubit** + %12 = load %Qubit*, %Qubit** %11, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %12, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test1__1 + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__T, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) call void @__quantum__rt__callable_make_adjoint(%Callable* %13) call void @__quantum__rt__callable_make_controlled(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %15 = bitcast %Tuple* %14 to { %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %13, %Callable** %11, align 8 - store %Array* %ctls, %Array** %12, align 8 - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__12, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + store %Array* %ctls, %Array** %16, align 8 + store %Qubit* %qubit, %Qubit** %17, align 8 + call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) ret void } -define internal void @Lifted__PartialApplication__11__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) ret void } -define internal void @Lifted__PartialApplication__11__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__X__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %2) ret void } -define internal void @Lifted__PartialApplication__11__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__X__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %2) ret void } -define internal void @Lifted__PartialApplication__11__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__X__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %3, %Qubit* %4) ret void } -define internal void @Lifted__PartialApplication__12__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %3, %Qubit* %4) ret void } -define internal void @Lifted__PartialApplication__12__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) ret void } -define internal void @Lifted__PartialApplication__12__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qubit) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) ret void } -define internal void @Lifted__PartialApplication__12__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define internal void @Microsoft__Quantum__Intrinsic__Z__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %2) ret void } -define internal { %String*, %String* }* @Microsoft__Quantum__Targeting__RequiresCapability__body(%String* %Level, %String* %Reason) { +define internal void @Microsoft__Quantum__Intrinsic__Z__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %1 = bitcast %Tuple* %0 to { %String*, %String* }* - %2 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 0 - %3 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 1 - store %String* %Level, %String** %2, align 8 - store %String* %Reason, %String** %3, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %Level, i32 1) - call void @__quantum__rt__string_update_reference_count(%String* %Reason, i32 1) - ret { %String*, %String* }* %1 + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %2) + ret void } -define internal { %String* }* @Microsoft__Quantum__Targeting__TargetInstruction__body(%String* %__Item1__) { +define internal void @Microsoft__Quantum__Intrinsic__Z__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %__Item1__, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) - ret { %String* }* %1 + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %3, %Qubit* %4) + ret void } -define void @Feasibility__QubitMapping__Interop() #0 { +define internal void @Microsoft__Quantum__Intrinsic__Z__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @Feasibility__QubitMapping__body() + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %3, %Qubit* %4) ret void } -define void @Feasibility__QubitMapping() #1 { +define internal void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { entry: - call void @Feasibility__QubitMapping__body() + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +define internal void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %controls = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %arg = load %Qubit*, %Qubit** %2, align 8 + %__qsVar0__numControls__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %__qsVar1__numControlPairs__ = sdiv i64 %__qsVar0__numControls__, 2 + %__qsVar2__temps__ = call %Array* @__quantum__rt__qubit_allocate_array(i64 %__qsVar1__numControlPairs__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 1) + %3 = sub i64 %__qsVar1__numControlPairs__, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %__qsVar0____qsVar3__numPair____ = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %4 = icmp sle i64 %__qsVar0____qsVar3__numPair____, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = mul i64 2, %__qsVar0____qsVar3__numPair____ + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %5) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %9 = mul i64 2, %__qsVar0____qsVar3__numPair____ + %10 = add i64 %9, 1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %10) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar3__numPair____) + %15 = bitcast i8* %14 to %Qubit** + %16 = load %Qubit*, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %8, %Qubit* %13, %Qubit* %16) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %__qsVar0____qsVar3__numPair____, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %18 = srem i64 %__qsVar0__numControls__, 2 + %19 = icmp eq i64 %18, 0 + br i1 %19, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__temps__, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + %20 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 0) + %22 = bitcast i8* %21 to %Qubit** + %23 = sub i64 %__qsVar0__numControls__, 1 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %23) + %25 = bitcast i8* %24 to %Qubit** + %26 = load %Qubit*, %Qubit** %25, align 8 + store %Qubit* %26, %Qubit** %22, align 8 + %27 = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar2__temps__, %Array* %20) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %__qsVar1____qsVar4__newControls____ = phi %Array* [ %__qsVar2__temps__, %condTrue__1 ], [ %27, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) + %28 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %28) + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %30 = bitcast %Tuple* %29 to { %Array*, %Qubit* }* + %31 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %30, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) + store %Array* %__qsVar1____qsVar4__newControls____, %Array** %31, align 8 + store %Qubit* %arg, %Qubit** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %28, %Tuple* %29, %Tuple* null) + %33 = sub i64 %__qsVar1__numControlPairs__, 1 + %34 = sub i64 %33, 0 + %35 = sdiv i64 %34, 1 + %36 = mul i64 1, %35 + %37 = add i64 0, %36 + %38 = load %Range, %Range* @EmptyRange, align 4 + %39 = insertvalue %Range %38, i64 %37, 0 + %40 = insertvalue %Range %39, i64 -1, 1 + %41 = insertvalue %Range %40, i64 0, 2 + %42 = extractvalue %Range %41, 0 + %43 = extractvalue %Range %41, 1 + %44 = extractvalue %Range %41, 2 + br label %preheader__1 + +preheader__1: ; preds = %condContinue__1 + %45 = icmp sgt i64 %43, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0____qsVar0____qsVar3__numPair______ = phi i64 [ %42, %preheader__1 ], [ %61, %exiting__2 ] + %46 = icmp sle i64 %__qsVar0____qsVar0____qsVar3__numPair______, %44 + %47 = icmp sge i64 %__qsVar0____qsVar0____qsVar3__numPair______, %44 + %48 = select i1 %45, i1 %46, i1 %47 + br i1 %48, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %49 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %49) + %51 = bitcast i8* %50 to %Qubit** + %52 = load %Qubit*, %Qubit** %51, align 8 + %53 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ + %54 = add i64 %53, 1 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %54) + %56 = bitcast i8* %55 to %Qubit** + %57 = load %Qubit*, %Qubit** %56, align 8 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar0____qsVar3__numPair______) + %59 = bitcast i8* %58 to %Qubit** + %60 = load %Qubit*, %Qubit** %59, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %52, %Qubit* %57, %Qubit* %60) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %61 = add i64 %__qsVar0____qsVar0____qsVar3__numPair______, %43 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %__qsVar2__temps__) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) ret void } -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) + +define internal void @Microsoft__Quantum__Intrinsic___574075b4ddd242719dc782bda73052ae___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %controls = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %arg = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + %3 = bitcast { double, %Qubit* }* %arg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %__qsVar0__numControls__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %__qsVar1__numControlPairs__ = sdiv i64 %__qsVar0__numControls__, 2 + %__qsVar2__temps__ = call %Array* @__quantum__rt__qubit_allocate_array(i64 %__qsVar1__numControlPairs__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 1) + %4 = sub i64 %__qsVar1__numControlPairs__, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %__qsVar0____qsVar3__numPair____ = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %5 = icmp sle i64 %__qsVar0____qsVar3__numPair____, %4 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = mul i64 2, %__qsVar0____qsVar3__numPair____ + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %6) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = mul i64 2, %__qsVar0____qsVar3__numPair____ + %11 = add i64 %10, 1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %11) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar3__numPair____) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %9, %Qubit* %14, %Qubit* %17) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %__qsVar0____qsVar3__numPair____, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %19 = srem i64 %__qsVar0__numControls__, 2 + %20 = icmp eq i64 %19, 0 + br i1 %20, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__temps__, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) + %23 = bitcast i8* %22 to %Qubit** + %24 = sub i64 %__qsVar0__numControls__, 1 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %24) + %26 = bitcast i8* %25 to %Qubit** + %27 = load %Qubit*, %Qubit** %26, align 8 + store %Qubit* %27, %Qubit** %23, align 8 + %28 = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar2__temps__, %Array* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %__qsVar1____qsVar4__newControls____ = phi %Array* [ %__qsVar2__temps__, %condTrue__1 ], [ %28, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %31 = bitcast %Tuple* %30 to { %Array*, { double, %Qubit* }* }* + %32 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %31, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) + store %Array* %__qsVar1____qsVar4__newControls____, %Array** %32, align 8 + store { double, %Qubit* }* %arg, { double, %Qubit* }** %33, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %30, %Tuple* null) + %34 = sub i64 %__qsVar1__numControlPairs__, 1 + %35 = sub i64 %34, 0 + %36 = sdiv i64 %35, 1 + %37 = mul i64 1, %36 + %38 = add i64 0, %37 + %39 = load %Range, %Range* @EmptyRange, align 4 + %40 = insertvalue %Range %39, i64 %38, 0 + %41 = insertvalue %Range %40, i64 -1, 1 + %42 = insertvalue %Range %41, i64 0, 2 + %43 = extractvalue %Range %42, 0 + %44 = extractvalue %Range %42, 1 + %45 = extractvalue %Range %42, 2 + br label %preheader__1 + +preheader__1: ; preds = %condContinue__1 + %46 = icmp sgt i64 %44, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0____qsVar0____qsVar3__numPair______ = phi i64 [ %43, %preheader__1 ], [ %62, %exiting__2 ] + %47 = icmp sle i64 %__qsVar0____qsVar0____qsVar3__numPair______, %45 + %48 = icmp sge i64 %__qsVar0____qsVar0____qsVar3__numPair______, %45 + %49 = select i1 %46, i1 %47, i1 %48 + br i1 %49, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %50 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %50) + %52 = bitcast i8* %51 to %Qubit** + %53 = load %Qubit*, %Qubit** %52, align 8 + %54 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ + %55 = add i64 %54, 1 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %55) + %57 = bitcast i8* %56 to %Qubit** + %58 = load %Qubit*, %Qubit** %57, align 8 + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar0____qsVar3__numPair______) + %60 = bitcast i8* %59 to %Qubit** + %61 = load %Qubit*, %Qubit** %60, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %53, %Qubit* %58, %Qubit* %61) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %62 = add i64 %__qsVar0____qsVar0____qsVar3__numPair______, %44 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %__qsVar2__temps__) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic___c1b23cd4538f4bf9ab69d9c3e574aa70___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %controls = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %arg = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 + %3 = bitcast { %Qubit*, %Qubit* }* %arg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %__qsVar0__numControls__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %__qsVar1__numControlPairs__ = sdiv i64 %__qsVar0__numControls__, 2 + %__qsVar2__temps__ = call %Array* @__quantum__rt__qubit_allocate_array(i64 %__qsVar1__numControlPairs__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 1) + %4 = sub i64 %__qsVar1__numControlPairs__, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %__qsVar0____qsVar3__numPair____ = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %5 = icmp sle i64 %__qsVar0____qsVar3__numPair____, %4 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = mul i64 2, %__qsVar0____qsVar3__numPair____ + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %6) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = mul i64 2, %__qsVar0____qsVar3__numPair____ + %11 = add i64 %10, 1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %11) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar3__numPair____) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %9, %Qubit* %14, %Qubit* %17) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %__qsVar0____qsVar3__numPair____, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %19 = srem i64 %__qsVar0__numControls__, 2 + %20 = icmp eq i64 %19, 0 + br i1 %20, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__temps__, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) + %23 = bitcast i8* %22 to %Qubit** + %24 = sub i64 %__qsVar0__numControls__, 1 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %24) + %26 = bitcast i8* %25 to %Qubit** + %27 = load %Qubit*, %Qubit** %26, align 8 + store %Qubit* %27, %Qubit** %23, align 8 + %28 = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar2__temps__, %Array* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %__qsVar1____qsVar4__newControls____ = phi %Array* [ %__qsVar2__temps__, %condTrue__1 ], [ %28, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %31 = bitcast %Tuple* %30 to { %Array*, { %Qubit*, %Qubit* }* }* + %32 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %31, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) + store %Array* %__qsVar1____qsVar4__newControls____, %Array** %32, align 8 + store { %Qubit*, %Qubit* }* %arg, { %Qubit*, %Qubit* }** %33, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %30, %Tuple* null) + %34 = sub i64 %__qsVar1__numControlPairs__, 1 + %35 = sub i64 %34, 0 + %36 = sdiv i64 %35, 1 + %37 = mul i64 1, %36 + %38 = add i64 0, %37 + %39 = load %Range, %Range* @EmptyRange, align 4 + %40 = insertvalue %Range %39, i64 %38, 0 + %41 = insertvalue %Range %40, i64 -1, 1 + %42 = insertvalue %Range %41, i64 0, 2 + %43 = extractvalue %Range %42, 0 + %44 = extractvalue %Range %42, 1 + %45 = extractvalue %Range %42, 2 + br label %preheader__1 + +preheader__1: ; preds = %condContinue__1 + %46 = icmp sgt i64 %44, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0____qsVar0____qsVar3__numPair______ = phi i64 [ %43, %preheader__1 ], [ %62, %exiting__2 ] + %47 = icmp sle i64 %__qsVar0____qsVar0____qsVar3__numPair______, %45 + %48 = icmp sge i64 %__qsVar0____qsVar0____qsVar3__numPair______, %45 + %49 = select i1 %46, i1 %47, i1 %48 + br i1 %49, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %50 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %50) + %52 = bitcast i8* %51 to %Qubit** + %53 = load %Qubit*, %Qubit** %52, align 8 + %54 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ + %55 = add i64 %54, 1 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %55) + %57 = bitcast i8* %56 to %Qubit** + %58 = load %Qubit*, %Qubit** %57, align 8 + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar0____qsVar3__numPair______) + %60 = bitcast i8* %59 to %Qubit** + %61 = load %Qubit*, %Qubit** %60, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %53, %Qubit* %58, %Qubit* %61) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %62 = add i64 %__qsVar0____qsVar0____qsVar3__numPair______, %44 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %__qsVar2__temps__) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + ret void +} + +define { i8, i8 }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() #1 { +entry: + %0 = call { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %1 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i32 0, i32 1 + %3 = load %Result*, %Result** %1, align 8 + %4 = load %Result*, %Result** %2, align 8 + %5 = call %Result* @__quantum__rt__result_get_zero() + %6 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %5) + %7 = select i1 %6, i8 0, i8 -1 + %8 = call %Result* @__quantum__rt__result_get_zero() + %9 = call i1 @__quantum__rt__result_equal(%Result* %4, %Result* %8) + %10 = select i1 %9, i8 0, i8 -1 + %11 = call i8* @__quantum__rt__memory_allocate(i64 mul nuw (i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64), i64 2)) + %12 = bitcast i8* %11 to { i8, i8 }* + %13 = getelementptr { i8, i8 }, { i8, i8 }* %12, i64 0, i32 0 + store i8 %7, i8* %13, align 1 + %14 = getelementptr { i8, i8 }, { i8, i8 }* %12, i64 0, i32 1 + store i8 %10, i8* %14, align 1 + call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) + %15 = bitcast { %Result*, %Result* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + ret { i8, i8 }* %12 +} + +declare %Result* @__quantum__rt__result_get_zero() + +declare i8* @__quantum__rt__memory_allocate(i64) + +define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() #2 { +entry: + %0 = call { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i32 0, i32 0)) + %2 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i32 0, i32 0 + %3 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i32 0, i32 1 + %4 = load %Result*, %Result** %2, align 8 + %5 = load %Result*, %Result** %3, align 8 + %6 = call %String* @__quantum__rt__result_to_string(%Result* %4) + %7 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %6) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @3, i32 0, i32 0)) + %9 = call %String* @__quantum__rt__string_concatenate(%String* %7, %String* %8) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + %10 = call %String* @__quantum__rt__result_to_string(%Result* %5) + %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @4, i32 0, i32 0)) + %13 = call %String* @__quantum__rt__string_concatenate(%String* %11, %String* %12) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__message(%String* %13) + call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %5, i32 -1) + %14 = bitcast { %Result*, %Result* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) + +declare %String* @__quantum__rt__result_to_string(%Result*) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +attributes #0 = { nounwind readnone speculatable willreturn } +attributes #1 = { "InteropFriendly" } +attributes #2 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll index 6ceb4d6776..36581a421e 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -1,51 +1,274 @@ ; ModuleID = 'qir/ConstSizeArray.ll' source_filename = "qir/ConstSizeArray.ll" -%Array = type opaque %Qubit = type opaque +%Result = type opaque +%Array = type opaque +%Tuple = type opaque +%String = type opaque + +@0 = internal constant [2 x i8] c"(\00" +@1 = internal constant [3 x i8] c", \00" +@2 = internal constant [2 x i8] c")\00" -define internal fastcc void @Feasibility__QubitMapping__body() unnamed_addr { +define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { entry: - %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i32 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %1 = bitcast i8* %0 to %Qubit** - %qubit = load %Qubit*, %Qubit** %1, align 8 - call void @__quantum__qis__x__body(%Qubit* %qubit) - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %3 = bitcast i8* %2 to %Qubit** - %qubit.1 = load %Qubit*, %Qubit** %3, align 8 - call void @__quantum__qis__x__body(%Qubit* %qubit.1) - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %5 = bitcast i8* %4 to %Qubit** - %qubit.2 = load %Qubit*, %Qubit** %5, align 8 - call void @__quantum__qis__x__body(%Qubit* %qubit.2) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qs) + %0 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %dest) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + %3 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %intermediary) + %4 = call %Result* @__quantum__rt__result_get_one() + %5 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %4) + call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) + br i1 %5, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + call fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %dest) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + ret void +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) unnamed_addr { +entry: + %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +declare %Result* @__quantum__rt__result_get_one() local_unnamed_addr + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr + +define internal fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) unnamed_addr { +entry: + call void @__quantum__qis__z(%Qubit* %qubit) ret void } +define internal fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) unnamed_addr { +entry: + call void @__quantum__qis__x(%Qubit* %qubit) + ret void +} + +define internal fastcc { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { +entry: + %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %leftPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 1) + %rightPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 1) + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %2 = load %Qubit*, %Qubit** %1, align 8 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %4 = bitcast i8* %3 to %Qubit** + %5 = load %Qubit*, %Qubit** %4, align 8 + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %5) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %10 = bitcast i8* %9 to %Qubit** + %11 = load %Qubit*, %Qubit** %10, align 8 + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %8, %Qubit* %11) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %14, %Qubit* %17) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %19 = bitcast i8* %18 to %Qubit** + %20 = load %Qubit*, %Qubit** %19, align 8 + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) + %22 = bitcast i8* %21 to %Qubit** + %23 = load %Qubit*, %Qubit** %22, align 8 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %25 = bitcast i8* %24 to %Qubit** + %26 = load %Qubit*, %Qubit** %25, align 8 + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %20, %Qubit* %23, %Qubit* %26) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 16) + %28 = bitcast %Tuple* %27 to { %Result*, %Result* }* + %29 = bitcast %Tuple* %27 to %Result** + %30 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %28, i64 0, i32 1 + %31 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %leftMessage) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %33 = bitcast i8* %32 to %Qubit** + %34 = load %Qubit*, %Qubit** %33, align 8 + %35 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %34) + store %Result* %31, %Result** %29, align 8 + store %Result* %35, %Result** %30, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) + call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) + call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) + call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) + ret { %Result*, %Result* }* %28 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr + declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr +declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr + declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr +define internal fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %left, %Qubit* %right) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %left) + call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %left, %Qubit* %right) + ret void +} + declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr -declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr +define internal fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { +entry: + call fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %src, %Qubit* %intermediary) + call fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) + ret void +} -define void @Feasibility__QubitMapping__Interop() local_unnamed_addr #0 { +declare %Tuple* @__quantum__rt__tuple_create(i64) local_unnamed_addr + +define internal fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) unnamed_addr { +entry: + call void @__quantum__qis__h(%Qubit* %qubit) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) unnamed_addr { +entry: + call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %left, %Qubit* %right) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %left, %Qubit* %right) + call fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %left) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) unnamed_addr { entry: - call fastcc void @Feasibility__QubitMapping__body() + call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) ret void } -define void @Feasibility__QubitMapping() local_unnamed_addr #1 { +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) local_unnamed_addr + +declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) local_unnamed_addr + +declare void @__quantum__qis__h(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__x(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__z(%Qubit*) local_unnamed_addr + +declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr + +declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr + +define { i8, i8 }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { entry: - call fastcc void @Feasibility__QubitMapping__body() + %0 = call fastcc { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %1 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 0 + %2 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 1 + %3 = load %Result*, %Result** %1, align 8 + %4 = load %Result*, %Result** %2, align 8 + %5 = call %Result* @__quantum__rt__result_get_zero() + %6 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %5) + %not. = xor i1 %6, true + %7 = sext i1 %not. to i8 + %8 = call %Result* @__quantum__rt__result_get_zero() + %9 = call i1 @__quantum__rt__result_equal(%Result* %4, %Result* %8) + %not.1 = xor i1 %9, true + %10 = sext i1 %not.1 to i8 + %11 = call i8* @__quantum__rt__memory_allocate(i64 2) + %12 = bitcast i8* %11 to { i8, i8 }* + store i8 %7, i8* %11, align 1 + %13 = getelementptr i8, i8* %11, i64 1 + store i8 %10, i8* %13, align 1 + call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) + %14 = bitcast { %Result*, %Result* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret { i8, i8 }* %12 +} + +declare %Result* @__quantum__rt__result_get_zero() local_unnamed_addr + +declare i8* @__quantum__rt__memory_allocate(i64) local_unnamed_addr + +define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { +entry: + %0 = call fastcc { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @0, i64 0, i64 0)) + %2 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 0 + %3 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 1 + %4 = load %Result*, %Result** %2, align 8 + %5 = load %Result*, %Result** %3, align 8 + %6 = call %String* @__quantum__rt__result_to_string(%Result* %4) + %7 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %6) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i64 0, i64 0)) + %9 = call %String* @__quantum__rt__string_concatenate(%String* %7, %String* %8) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + %10 = call %String* @__quantum__rt__result_to_string(%Result* %5) + %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i64 0, i64 0)) + %13 = call %String* @__quantum__rt__string_concatenate(%String* %11, %String* %12) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__message(%String* %13) + call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %5, i32 -1) + %14 = bitcast { %Result*, %Result* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) ret void } +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__result_to_string(%Result*) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) local_unnamed_addr + attributes #0 = { "InteropFriendly" } attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/test.ll b/src/Passes/examples/QubitAllocationAnalysis/test.ll index b4f2651a62..b35a587431 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/test.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/test.ll @@ -2,40 +2,236 @@ source_filename = "qir/ConstSizeArray.ll" %Qubit = type opaque +%Result = type opaque +%Tuple = type opaque %Array = type opaque +%String = type opaque -define internal fastcc void @Feasibility__QubitMapping__body() unnamed_addr { +@0 = internal constant [2 x i8] c"(\00" +@1 = internal constant [3 x i8] c", \00" +@2 = internal constant [2 x i8] c")\00" + +define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { entry: - %qubit = inttoptr i64 0 to %Qubit* - call void @__quantum__qis__x__body(%Qubit* %qubit) - %qubit.1 = inttoptr i64 1 to %Qubit* - call void @__quantum__qis__x__body(%Qubit* %qubit.1) - %qubit.2 = inttoptr i64 2 to %Qubit* - call void @__quantum__qis__x__body(%Qubit* %qubit.2) + %0 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %dest) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + %3 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %intermediary) + %4 = call %Result* @__quantum__rt__result_get_one() + %5 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %4) + call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) + br i1 %5, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + call fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %dest) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 ret void } +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) unnamed_addr { +entry: + %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +declare %Result* @__quantum__rt__result_get_one() local_unnamed_addr + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr + +define internal fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) unnamed_addr { +entry: + call void @__quantum__qis__z(%Qubit* %qubit) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) unnamed_addr { +entry: + call void @__quantum__qis__x(%Qubit* %qubit) + ret void +} + +define internal fastcc { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { +entry: + %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) + %0 = inttoptr i64 0 to %Qubit* + %1 = inttoptr i64 2 to %Qubit* + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %0, %Qubit* %1) + %2 = inttoptr i64 1 to %Qubit* + %3 = inttoptr i64 3 to %Qubit* + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %3) + %4 = inttoptr i64 0 to %Qubit* + %5 = inttoptr i64 2 to %Qubit* + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %4, %Qubit* %5) + %6 = inttoptr i64 2 to %Qubit* + %7 = inttoptr i64 1 to %Qubit* + %8 = inttoptr i64 3 to %Qubit* + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %6, %Qubit* %7, %Qubit* %8) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 16) + %10 = bitcast %Tuple* %9 to { %Result*, %Result* }* + %11 = bitcast %Tuple* %9 to %Result** + %12 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %10, i64 0, i32 1 + %13 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %leftMessage) + %14 = inttoptr i64 3 to %Qubit* + %15 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %14) + store %Result* %13, %Result** %11, align 8 + store %Result* %15, %Result** %12, align 8 + call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) + call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) + ret { %Result*, %Result* }* %10 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr + declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr +declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr + declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr +define internal fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %left, %Qubit* %right) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %left) + call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %left, %Qubit* %right) + ret void +} + declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr -declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr +define internal fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { +entry: + call fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %src, %Qubit* %intermediary) + call fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) + ret void +} + +declare %Tuple* @__quantum__rt__tuple_create(i64) local_unnamed_addr -define void @Feasibility__QubitMapping__Interop() local_unnamed_addr #0 { +define internal fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) unnamed_addr { entry: - call fastcc void @Feasibility__QubitMapping__body() + call void @__quantum__qis__h(%Qubit* %qubit) ret void } -define void @Feasibility__QubitMapping() local_unnamed_addr #1 { +define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) unnamed_addr { entry: - call fastcc void @Feasibility__QubitMapping__body() + call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) ret void } +define internal fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %left, %Qubit* %right) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %left, %Qubit* %right) + call fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %left) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) local_unnamed_addr + +declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) local_unnamed_addr + +declare void @__quantum__qis__h(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__x(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__z(%Qubit*) local_unnamed_addr + +declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr + +declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr + +define { i8, i8 }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { +entry: + %0 = call fastcc { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %1 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 0 + %2 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 1 + %3 = load %Result*, %Result** %1, align 8 + %4 = load %Result*, %Result** %2, align 8 + %5 = call %Result* @__quantum__rt__result_get_zero() + %6 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %5) + %not. = xor i1 %6, true + %7 = sext i1 %not. to i8 + %8 = call %Result* @__quantum__rt__result_get_zero() + %9 = call i1 @__quantum__rt__result_equal(%Result* %4, %Result* %8) + %not.1 = xor i1 %9, true + %10 = sext i1 %not.1 to i8 + %11 = call i8* @__quantum__rt__memory_allocate(i64 2) + %12 = bitcast i8* %11 to { i8, i8 }* + store i8 %7, i8* %11, align 1 + %13 = getelementptr i8, i8* %11, i64 1 + store i8 %10, i8* %13, align 1 + call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) + %14 = bitcast { %Result*, %Result* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret { i8, i8 }* %12 +} + +declare %Result* @__quantum__rt__result_get_zero() local_unnamed_addr + +declare i8* @__quantum__rt__memory_allocate(i64) local_unnamed_addr + +define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { +entry: + %0 = call fastcc { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @0, i64 0, i64 0)) + %2 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 0 + %3 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 1 + %4 = load %Result*, %Result** %2, align 8 + %5 = load %Result*, %Result** %3, align 8 + %6 = call %String* @__quantum__rt__result_to_string(%Result* %4) + %7 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %6) + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i64 0, i64 0)) + %9 = call %String* @__quantum__rt__string_concatenate(%String* %7, %String* %8) + %10 = call %String* @__quantum__rt__result_to_string(%Result* %5) + %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i64 0, i64 0)) + %13 = call %String* @__quantum__rt__string_concatenate(%String* %11, %String* %12) + call void @__quantum__rt__message(%String* %13) + call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %5, i32 -1) + %14 = bitcast { %Result*, %Result* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__result_to_string(%Result*) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) local_unnamed_addr + attributes #0 = { "InteropFriendly" } attributes #1 = { "EntryPoint" } From 938757b6da68185f2dc9c0aba23ad7eb7439713b Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 10 Aug 2021 10:09:38 +0200 Subject: [PATCH 067/106] Fixing include --- src/Passes/Source/{LLvm => Llvm}/Llvm.hpp | 0 src/Passes/examples/QubitAllocationAnalysis/Makefile | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename src/Passes/Source/{LLvm => Llvm}/Llvm.hpp (100%) diff --git a/src/Passes/Source/LLvm/Llvm.hpp b/src/Passes/Source/Llvm/Llvm.hpp similarity index 100% rename from src/Passes/Source/LLvm/Llvm.hpp rename to src/Passes/Source/Llvm/Llvm.hpp diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index 11913ff58b..d6bd5f6635 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -8,7 +8,7 @@ run: build-qaa analysis-example.ll run-replace: build-ir analysis-example.ll # opt -loop-unroll -unroll-count=3 -unroll-allow-partial - opt -load-pass-plugin ../../Debug/Source/Passes/libTransformationRule.dylib --passes="mem2reg,simplifycfg,loop-simplify,loop-rotate,loop-unroll,transformation-rule" -S analysis-example.ll > test.ll + opt -load-pass-plugin ../../Debug/Source/Passes/libTransformationRule.dylib --passes="mem2reg,simplifycfg,loop-simplify,loop-unroll,transformation-rule" -S analysis-example.ll > test.ll opt --passes="inline" -S test.ll | opt -O1 -S From d3485df4dbe235f238dd281e2ad361939545a3df Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 10 Aug 2021 12:46:38 +0200 Subject: [PATCH 068/106] Updating replacements --- .../TransformationRule/TransformationRule.cpp | 23 ++- src/Passes/Source/Rules/ReplacementRule.cpp | 5 + src/Passes/Source/Rules/ReplacementRule.hpp | 16 ++- src/Passes/Source/Rules/RuleSet.cpp | 136 +++++++++++------- .../ConstSizeArray/ConstSizeArray.qs | 4 +- .../ConstSizeArray/qir/ConstSizeArray.ll | 125 ++++------------ .../analysis-example.ll | 91 ++---------- .../examples/QubitAllocationAnalysis/test.ll | 103 +++---------- 8 files changed, 178 insertions(+), 325 deletions(-) diff --git a/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp b/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp index 3673a3f2cb..3e4cbfb115 100644 --- a/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp +++ b/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp @@ -29,25 +29,36 @@ llvm::PreservedAnalyses TransformationRulePass::run(llvm::Function &function, // Applying all replacements for (auto it = replacements_.rbegin(); it != replacements_.rend(); ++it) { + auto instr1 = llvm::dyn_cast(it->first); + if (instr1 == nullptr) + { + llvm::errs() << "; WARNING: cannot deal with non-instruction replacements\n"; + continue; + } + // Cheking if have a replacement for the instruction if (it->second != nullptr) { // ... if so, we just replace it, - llvm::ReplaceInstWithInst(it->first, it->second); + auto instr2 = llvm::dyn_cast(it->second); + if (instr2 == nullptr) + { + llvm::errs() << "; WARNING: cannot replace instruction with non-instruction\n"; + continue; + } + llvm::ReplaceInstWithInst(instr1, instr2); } else { // ... otherwise we delete the the instruction - auto instruction = it->first; - // Removing all uses - if (!instruction->use_empty()) + if (!instr1->use_empty()) { - instruction->replaceAllUsesWith(llvm::UndefValue::get(instruction->getType())); + instr1->replaceAllUsesWith(llvm::UndefValue::get(instr1->getType())); } // And finally we delete the instruction - instruction->eraseFromParent(); + instr1->eraseFromParent(); } } diff --git a/src/Passes/Source/Rules/ReplacementRule.cpp b/src/Passes/Source/Rules/ReplacementRule.cpp index c693440d0f..76d9c881bf 100644 --- a/src/Passes/Source/Rules/ReplacementRule.cpp +++ b/src/Passes/Source/Rules/ReplacementRule.cpp @@ -6,6 +6,11 @@ namespace microsoft { namespace quantum { +ReplacementRule::ReplacementRule(OperandPrototypePtr &&pattern, ReplaceFunction &&replacer) + : pattern_{std::move(pattern)} + , replacer_{std::move(replacer)} +{} + void ReplacementRule::setPattern(OperandPrototypePtr &&pattern) { pattern_ = std::move(pattern); diff --git a/src/Passes/Source/Rules/ReplacementRule.hpp b/src/Passes/Source/Rules/ReplacementRule.hpp index 0408ef5023..14a63b9202 100644 --- a/src/Passes/Source/Rules/ReplacementRule.hpp +++ b/src/Passes/Source/Rules/ReplacementRule.hpp @@ -18,9 +18,12 @@ class ReplacementRule using Value = llvm::Value; using OperandPrototypePtr = std::shared_ptr; using Builder = llvm::IRBuilder<>; - using Replacements = std::vector>; + using Replacements = std::vector>; using ReplaceFunction = std::function; + ReplacementRule() = default; + ReplacementRule(OperandPrototypePtr &&pattern, ReplaceFunction &&replacer); + /// Rule configuration /// @{ void setPattern(OperandPrototypePtr &&pattern); @@ -107,6 +110,17 @@ inline Capture operator""_cap(char const *name, std::size_t) return Capture(name); } +inline std::function +deleteInstruction() +{ + return [](ReplacementRule::Builder &, ReplacementRule::Value *val, ReplacementRule::Captures &, + ReplacementRule::Replacements &replacements) { + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }; +} + } // namespace patterns } // namespace quantum diff --git a/src/Passes/Source/Rules/RuleSet.cpp b/src/Passes/Source/Rules/RuleSet.cpp index 1120ac529e..cde460e7ac 100644 --- a/src/Passes/Source/Rules/RuleSet.cpp +++ b/src/Passes/Source/Rules/RuleSet.cpp @@ -107,61 +107,41 @@ RuleSet::RuleSet() rule1b.setPattern(Call("__quantum__rt__qubit_allocate")); // Replacement details - rule1b.setReplacer([](Builder &, Value *, Captures &, Replacements &) { - // std::cout << "Found single allocation" << std::endl; - return false; - }); - rules_.emplace_back(std::move(rule1b)); + rule1b.setReplacer( + [alloc_manager](Builder &builder, Value *val, Captures &, Replacements &replacements) { + // Getting the type pointer + auto ptr_type = llvm::dyn_cast(val->getType()); + if (ptr_type == nullptr) + { + return false; + } - // Rule 2 - delete __quantum__rt__array_update_alias_count - ReplacementRule rule2a; - auto alias_count1 = std::make_shared("__quantum__rt__array_update_alias_count"); - rule2a.setPattern(alias_count1); - rule2a.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - rules_.emplace_back(std::move(rule2a)); - - ReplacementRule rule2b; - auto alias_count2 = std::make_shared("__quantum__rt__string_update_alias_count"); - rule2b.setPattern(alias_count2); - rule2b.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - rules_.emplace_back(std::move(rule2b)); - - // Rule 3 - ReplacementRule rule3a; - auto reference_count1 = - std::make_shared("__quantum__rt__array_update_reference_count"); - rule3a.setPattern(reference_count1); - rule3a.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - rules_.emplace_back(std::move(rule3a)); - - ReplacementRule rule3b; - auto reference_count2 = - std::make_shared("__quantum__rt__string_update_reference_count"); - rule3b.setPattern(reference_count2); - rule3b.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - rules_.emplace_back(std::move(rule3b)); - - // Rule 4 - delete __quantum__rt__qubit_release_array - ReplacementRule rule4; - auto release_call = std::make_shared("__quantum__rt__qubit_release_array"); - rule4.setPattern(release_call); - rule4.setReplacer([](Builder &, Value *val, Captures &, Replacements &replacements) { - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - rules_.emplace_back(std::move(rule4)); + // Allocating qubit + alloc_manager->allocate(val->getName().str(), 1); + + // Computing the index by getting the current index value and offseting by + // the offset at which the qubit array is allocated. + auto offset = alloc_manager->getOffset(val->getName().str()); + + // Creating a new index APInt that is shifted by the offset of the allocation + // TODO(tfr): Get the bitwidth size from somewhere + auto idx = llvm::APInt(64, offset); + + // Computing offset + auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); + + // TODO(tfr): Understand what the significance of the addressspace is in relation to the + // QIR. Activate by uncommenting: + // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); + auto instr = new llvm::IntToPtrInst(new_index, ptr_type); + instr->takeName(val); + + // Replacing the instruction with new instruction + replacements.push_back({llvm::dyn_cast(val), instr}); + + return false; + }); + rules_.emplace_back(std::move(rule1b)); // Rule 6 - perform static allocation and delete __quantum__rt__qubit_allocate_array ReplacementRule rule6; @@ -209,7 +189,6 @@ RuleSet::RuleSet() rules_.emplace_back(std::move(rule8)); // Rule 10 - track stored values - auto get_target_element = Call("__quantum__rt__array_get_element_ptr_1d", "targetArrayName"_cap = _, "targetIndex"_cap = _); auto get_value_element = Call("__quantum__rt__array_get_element_ptr_1d", "valueArrayName"_cap = _, @@ -228,6 +207,53 @@ RuleSet::RuleSet() return false; }); rules_.emplace_back(std::move(rule10)); + + // Measurements + auto replace_measurement = [](Builder &, Value *, Captures &, Replacements &) { + llvm::errs() << "Found measurement" + << "\n"; + + // Getting the type pointer + auto ptr_type = llvm::dyn_cast(val->getType()); + if (ptr_type == nullptr) + { + return false; + } + + return false; + }; + + rules_.emplace_back(Call("__quantum__qis__m__body", "qubit"_cap = _), replace_measurement); + + // Quantum comparisons + auto get_one = Call("__quantum__rt__result_get_one"); + auto replace_one = [](Builder &, Value *, Captures &, Replacements &) { + llvm::errs() << "Found comparison" + << "\n"; + return false; + }; + + // Variations of get_one + rules_.emplace_back(Call("__quantum__rt__result_equal", "result"_cap = _, get_one), replace_one); + rules_.emplace_back(Call("__quantum__rt__result_equal", get_one, "result"_cap = _), replace_one); + + // Functions that we do not care about + rules_.emplace_back(Call("__quantum__rt__array_update_alias_count", _, _), deleteInstruction()); + rules_.emplace_back(Call("__quantum__rt__string_update_alias_count", _, _), deleteInstruction()); + rules_.emplace_back(Call("__quantum__rt__result_update_alias_count", _, _), deleteInstruction()); + rules_.emplace_back(Call("__quantum__rt__array_update_reference_count", _, _), + deleteInstruction()); + rules_.emplace_back(Call("__quantum__rt__string_update_reference_count", _, _), + deleteInstruction()); + rules_.emplace_back(Call("__quantum__rt__result_update_reference_count", _, _), + deleteInstruction()); + + rules_.emplace_back(Call("__quantum__rt__qubit_release_array", _), deleteInstruction()); + rules_.emplace_back(Call("__quantum__rt__qubit_release", _), deleteInstruction()); + rules_.emplace_back(Call("__quantum__rt__string_create", _), deleteInstruction()); + rules_.emplace_back(Call("__quantum__rt__string_release", _), deleteInstruction()); + + rules_.emplace_back(Call("__quantum__rt__message", _), deleteInstruction()); } bool RuleSet::matchAndReplace(Instruction *value, Replacements &replacements) diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index 9d4e8f0927..ccfdf466c1 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -34,7 +34,7 @@ namespace TeleportChain { } @EntryPoint() - operation DemonstrateTeleportationUsingPresharedEntanglement() : (Result, Result) { + operation DemonstrateTeleportationUsingPresharedEntanglement() : Unit { let nPairs = 2; use (leftMessage, rightMessage, leftPreshared, rightPreshared) = (Qubit(), Qubit(), Qubit[nPairs], Qubit[nPairs]); PrepareEntangledPair(leftMessage, rightMessage); @@ -47,6 +47,6 @@ namespace TeleportChain { TeleportQubitUsingPresharedEntanglement(rightPreshared[i-1], leftPreshared[i], rightPreshared[i]); } - return (MResetZ(leftMessage), MResetZ(rightPreshared[nPairs-1])); + // return (MResetZ(leftMessage), MResetZ(rightPreshared[nPairs-1])); } } \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll index a646404c8b..55239e8db1 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll @@ -23,9 +23,7 @@ @Microsoft__Quantum__Intrinsic__T = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__ctladj__wrapper] @Microsoft__Quantum__Intrinsic__X = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper] @Microsoft__Quantum__Intrinsic__Z = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__ctladj__wrapper] -@2 = internal constant [2 x i8] c"(\00" -@3 = internal constant [3 x i8] c", \00" -@4 = internal constant [2 x i8] c")\00" +@2 = internal constant [3 x i8] c"()\00" define internal void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) { entry: @@ -79,7 +77,7 @@ entry: ret void } -define internal { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() { +define internal void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() { entry: %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() @@ -143,24 +141,13 @@ exiting__2: ; preds = %body__2 br label %header__2 exit__2: ; preds = %header__2 - %26 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %27 = bitcast %Tuple* %26 to { %Result*, %Result* }* - %28 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %27, i32 0, i32 0 - %29 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %27, i32 0, i32 1 - %30 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %leftMessage) - %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %32 = bitcast i8* %31 to %Qubit** - %33 = load %Qubit*, %Qubit** %32, align 8 - %34 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %33) - store %Result* %30, %Result** %28, align 8 - store %Result* %34, %Result** %29, align 8 call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) - ret { %Result*, %Result* }* %27 + ret void } declare %Qubit* @__quantum__rt__qubit_allocate() @@ -189,8 +176,6 @@ entry: ret void } -declare %Tuple* @__quantum__rt__tuple_create(i64) - define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { entry: call void @__quantum__qis__h(%Qubit* %qubit) @@ -281,7 +266,7 @@ else__1: ; preds = %test1__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %10, align 8 store %Qubit* %qubit, %Qubit** %11, align 8 - call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %7, { %Array*, %Qubit* }* %9) + call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %7, { %Array*, %Qubit* }* %9) call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -336,7 +321,7 @@ else__1: ; preds = %test1__1 store %Qubit* %target, %Qubit** %18, align 8 store %Array* %ctls, %Array** %13, align 8 store { %Qubit*, %Qubit* }* %16, { %Qubit*, %Qubit* }** %14, align 8 - call void @Microsoft__Quantum__Intrinsic___c1b23cd4538f4bf9ab69d9c3e574aa70___QsRef23__ApplyWithLessControlsA____body(%Callable* %10, { %Array*, { %Qubit*, %Qubit* }* }* %12) + call void @Microsoft__Quantum__Intrinsic___94a8d85ac915454e866ebb12115de6b8___QsRef23__ApplyWithLessControlsA____body(%Callable* %10, { %Array*, { %Qubit*, %Qubit* }* }* %12) call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -349,6 +334,8 @@ continue__1: ; preds = %else__1, %then1__1, ret void } +declare %Tuple* @__quantum__rt__tuple_create(i64) + declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) define internal void @TeleportChain__PrepareEntangledPair__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { @@ -1056,7 +1043,7 @@ else__1: ; preds = %test2__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %50, align 8 store %Qubit* %qubit, %Qubit** %51, align 8 - call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %47, { %Array*, %Qubit* }* %49) + call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %47, { %Array*, %Qubit* }* %49) call void @__quantum__rt__capture_update_reference_count(%Callable* %47, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %47, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -1148,7 +1135,7 @@ else__1: ; preds = %test2__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %17, align 8 store %Qubit* %qubit, %Qubit** %18, align 8 - call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %14, { %Array*, %Qubit* }* %16) + call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %14, { %Array*, %Qubit* }* %16) call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -1185,7 +1172,7 @@ entry: ret void } -define internal void @Microsoft__Quantum__Intrinsic___c1b23cd4538f4bf9ab69d9c3e574aa70___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic___94a8d85ac915454e866ebb12115de6b8___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { entry: call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) @@ -1376,7 +1363,7 @@ declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) -define internal void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, %Qubit* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, %Qubit* }* %0) { entry: call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) @@ -1793,7 +1780,7 @@ else__1: ; preds = %test1__1 store %Qubit* %qubit, %Qubit** %23, align 8 store %Array* %ctls, %Array** %18, align 8 store { double, %Qubit* }* %21, { double, %Qubit* }** %19, align 8 - call void @Microsoft__Quantum__Intrinsic___574075b4ddd242719dc782bda73052ae___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) + call void @Microsoft__Quantum__Intrinsic___9ebafa51892848d598fa08bc663d8d45___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -1859,7 +1846,7 @@ else__1: ; preds = %test1__1 store %Qubit* %qubit, %Qubit** %23, align 8 store %Array* %ctls, %Array** %18, align 8 store { double, %Qubit* }* %21, { double, %Qubit* }** %19, align 8 - call void @Microsoft__Quantum__Intrinsic___574075b4ddd242719dc782bda73052ae___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) + call void @Microsoft__Quantum__Intrinsic___9ebafa51892848d598fa08bc663d8d45___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -1924,7 +1911,7 @@ else__1: ; preds = %test1__1 store %Qubit* %qubit, %Qubit** %24, align 8 store %Array* %ctls, %Array** %19, align 8 store { double, %Qubit* }* %22, { double, %Qubit* }** %20, align 8 - call void @Microsoft__Quantum__Intrinsic___574075b4ddd242719dc782bda73052ae___QsRef23__ApplyWithLessControlsA____body(%Callable* %16, { %Array*, { double, %Qubit* }* }* %18) + call void @Microsoft__Quantum__Intrinsic___9ebafa51892848d598fa08bc663d8d45___QsRef23__ApplyWithLessControlsA____body(%Callable* %16, { %Array*, { double, %Qubit* }* }* %18) call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %16, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -2295,7 +2282,7 @@ declare double @llvm.pow.f64(double, double) #0 declare void @__quantum__qis__rx(double, %Qubit*) -define internal void @Microsoft__Quantum__Intrinsic___574075b4ddd242719dc782bda73052ae___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic___9ebafa51892848d598fa08bc663d8d45___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { entry: call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) @@ -2617,7 +2604,7 @@ else__1: ; preds = %test1__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %16, align 8 store %Qubit* %qubit, %Qubit** %17, align 8 - call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -2713,7 +2700,7 @@ else__1: ; preds = %test1__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %16, align 8 store %Qubit* %qubit, %Qubit** %17, align 8 - call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -2774,7 +2761,7 @@ else__1: ; preds = %test1__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %16, align 8 store %Qubit* %qubit, %Qubit** %17, align 8 - call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -2870,7 +2857,7 @@ else__1: ; preds = %test1__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %16, align 8 store %Qubit* %qubit, %Qubit** %17, align 8 - call void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -2996,7 +2983,7 @@ declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) -define internal void @Microsoft__Quantum__Intrinsic___56e34d5893aa45ea981d41b8530e77c5___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, %Qubit* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, %Qubit* }* %0) { entry: call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) @@ -3139,7 +3126,7 @@ declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) -define internal void @Microsoft__Quantum__Intrinsic___574075b4ddd242719dc782bda73052ae___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic___9ebafa51892848d598fa08bc663d8d45___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { entry: call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) @@ -3283,7 +3270,7 @@ exit__2: ; preds = %header__2 ret void } -define internal void @Microsoft__Quantum__Intrinsic___c1b23cd4538f4bf9ab69d9c3e574aa70___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic___94a8d85ac915454e866ebb12115de6b8___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { entry: call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) @@ -3427,77 +3414,25 @@ exit__2: ; preds = %header__2 ret void } -define { i8, i8 }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() #1 { -entry: - %0 = call { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - %1 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i32 0, i32 1 - %3 = load %Result*, %Result** %1, align 8 - %4 = load %Result*, %Result** %2, align 8 - %5 = call %Result* @__quantum__rt__result_get_zero() - %6 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %5) - %7 = select i1 %6, i8 0, i8 -1 - %8 = call %Result* @__quantum__rt__result_get_zero() - %9 = call i1 @__quantum__rt__result_equal(%Result* %4, %Result* %8) - %10 = select i1 %9, i8 0, i8 -1 - %11 = call i8* @__quantum__rt__memory_allocate(i64 mul nuw (i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64), i64 2)) - %12 = bitcast i8* %11 to { i8, i8 }* - %13 = getelementptr { i8, i8 }, { i8, i8 }* %12, i64 0, i32 0 - store i8 %7, i8* %13, align 1 - %14 = getelementptr { i8, i8 }, { i8, i8 }* %12, i64 0, i32 1 - store i8 %10, i8* %14, align 1 - call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) - %15 = bitcast { %Result*, %Result* }* %0 to %Tuple* - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) - ret { i8, i8 }* %12 +define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() #1 { +entry: + call void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + ret void } -declare %Result* @__quantum__rt__result_get_zero() - -declare i8* @__quantum__rt__memory_allocate(i64) - define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() #2 { entry: - %0 = call { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i32 0, i32 0)) - %2 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i32 0, i32 0 - %3 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i32 0, i32 1 - %4 = load %Result*, %Result** %2, align 8 - %5 = load %Result*, %Result** %3, align 8 - %6 = call %String* @__quantum__rt__result_to_string(%Result* %4) - %7 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %6) - call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) - %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @3, i32 0, i32 0)) - %9 = call %String* @__quantum__rt__string_concatenate(%String* %7, %String* %8) - call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) - %10 = call %String* @__quantum__rt__result_to_string(%Result* %5) - %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) - call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) - %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @4, i32 0, i32 0)) - %13 = call %String* @__quantum__rt__string_concatenate(%String* %11, %String* %12) - call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) - call void @__quantum__rt__message(%String* %13) - call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %5, i32 -1) - %14 = bitcast { %Result*, %Result* }* %0 to %Tuple* - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + call void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @2, i32 0, i32 0)) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) ret void } declare void @__quantum__rt__message(%String*) -declare %String* @__quantum__rt__result_to_string(%Result*) - declare void @__quantum__rt__string_update_reference_count(%String*, i32) -declare %String* @__quantum__rt__string_concatenate(%String*, %String*) - attributes #0 = { nounwind readnone speculatable willreturn } attributes #1 = { "InteropFriendly" } attributes #2 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll index 36581a421e..bccae3a539 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -4,12 +4,9 @@ source_filename = "qir/ConstSizeArray.ll" %Qubit = type opaque %Result = type opaque %Array = type opaque -%Tuple = type opaque %String = type opaque -@0 = internal constant [2 x i8] c"(\00" -@1 = internal constant [3 x i8] c", \00" -@2 = internal constant [2 x i8] c")\00" +@0 = internal constant [3 x i8] c"()\00" define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { entry: @@ -63,7 +60,7 @@ entry: ret void } -define internal fastcc { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { +define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { entry: %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() @@ -103,24 +100,13 @@ entry: %25 = bitcast i8* %24 to %Qubit** %26 = load %Qubit*, %Qubit** %25, align 8 call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %20, %Qubit* %23, %Qubit* %26) - %27 = call %Tuple* @__quantum__rt__tuple_create(i64 16) - %28 = bitcast %Tuple* %27 to { %Result*, %Result* }* - %29 = bitcast %Tuple* %27 to %Result** - %30 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %28, i64 0, i32 1 - %31 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %leftMessage) - %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %33 = bitcast i8* %32 to %Qubit** - %34 = load %Qubit*, %Qubit** %33, align 8 - %35 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %34) - store %Result* %31, %Result** %29, align 8 - store %Result* %35, %Result** %30, align 8 call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) - ret { %Result*, %Result* }* %28 + ret void } declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr @@ -149,8 +135,6 @@ entry: ret void } -declare %Tuple* @__quantum__rt__tuple_create(i64) local_unnamed_addr - define internal fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) unnamed_addr { entry: call void @__quantum__qis__h(%Qubit* %qubit) @@ -182,8 +166,6 @@ entry: ret void } -declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) local_unnamed_addr - declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) local_unnamed_addr declare void @__quantum__qis__h(%Qubit*) local_unnamed_addr @@ -198,77 +180,24 @@ declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr -define { i8, i8 }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { +define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { entry: - %0 = call fastcc { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - %1 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 0 - %2 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 1 - %3 = load %Result*, %Result** %1, align 8 - %4 = load %Result*, %Result** %2, align 8 - %5 = call %Result* @__quantum__rt__result_get_zero() - %6 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %5) - %not. = xor i1 %6, true - %7 = sext i1 %not. to i8 - %8 = call %Result* @__quantum__rt__result_get_zero() - %9 = call i1 @__quantum__rt__result_equal(%Result* %4, %Result* %8) - %not.1 = xor i1 %9, true - %10 = sext i1 %not.1 to i8 - %11 = call i8* @__quantum__rt__memory_allocate(i64 2) - %12 = bitcast i8* %11 to { i8, i8 }* - store i8 %7, i8* %11, align 1 - %13 = getelementptr i8, i8* %11, i64 1 - store i8 %10, i8* %13, align 1 - call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) - %14 = bitcast { %Result*, %Result* }* %0 to %Tuple* - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) - ret { i8, i8 }* %12 + call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + ret void } -declare %Result* @__quantum__rt__result_get_zero() local_unnamed_addr - -declare i8* @__quantum__rt__memory_allocate(i64) local_unnamed_addr - define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { entry: - %0 = call fastcc { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @0, i64 0, i64 0)) - %2 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 0 - %3 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 1 - %4 = load %Result*, %Result** %2, align 8 - %5 = load %Result*, %Result** %3, align 8 - %6 = call %String* @__quantum__rt__result_to_string(%Result* %4) - %7 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %6) - call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) - %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i64 0, i64 0)) - %9 = call %String* @__quantum__rt__string_concatenate(%String* %7, %String* %8) - call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) - %10 = call %String* @__quantum__rt__result_to_string(%Result* %5) - %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) - call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) - %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i64 0, i64 0)) - %13 = call %String* @__quantum__rt__string_concatenate(%String* %11, %String* %12) - call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) - call void @__quantum__rt__message(%String* %13) - call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %5, i32 -1) - %14 = bitcast { %Result*, %Result* }* %0 to %Tuple* - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i64 0, i64 0)) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) ret void } declare void @__quantum__rt__message(%String*) local_unnamed_addr -declare %String* @__quantum__rt__result_to_string(%Result*) local_unnamed_addr - declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr -declare %String* @__quantum__rt__string_concatenate(%String*, %String*) local_unnamed_addr - attributes #0 = { "InteropFriendly" } attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/test.ll b/src/Passes/examples/QubitAllocationAnalysis/test.ll index b35a587431..66549dea16 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/test.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/test.ll @@ -3,20 +3,16 @@ source_filename = "qir/ConstSizeArray.ll" %Qubit = type opaque %Result = type opaque -%Tuple = type opaque %Array = type opaque %String = type opaque -@0 = internal constant [2 x i8] c"(\00" -@1 = internal constant [3 x i8] c", \00" -@2 = internal constant [2 x i8] c")\00" +@0 = internal constant [3 x i8] c"()\00" define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { entry: %0 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) %1 = call %Result* @__quantum__rt__result_get_one() %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) - call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) br i1 %2, label %then0__1, label %continue__1 then0__1: ; preds = %entry @@ -27,7 +23,6 @@ continue__1: ; preds = %then0__1, %entry %3 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %intermediary) %4 = call %Result* @__quantum__rt__result_get_one() %5 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %4) - call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) br i1 %5, label %then0__2, label %continue__2 then0__2: ; preds = %continue__1 @@ -63,36 +58,25 @@ entry: ret void } -define internal fastcc { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { +define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { entry: - %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() - %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %leftMessage = inttoptr i64 0 to %Qubit* + %rightMessage = inttoptr i64 1 to %Qubit* call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) - %0 = inttoptr i64 0 to %Qubit* - %1 = inttoptr i64 2 to %Qubit* + %0 = inttoptr i64 2 to %Qubit* + %1 = inttoptr i64 4 to %Qubit* call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %0, %Qubit* %1) - %2 = inttoptr i64 1 to %Qubit* - %3 = inttoptr i64 3 to %Qubit* + %2 = inttoptr i64 3 to %Qubit* + %3 = inttoptr i64 5 to %Qubit* call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %3) - %4 = inttoptr i64 0 to %Qubit* - %5 = inttoptr i64 2 to %Qubit* + %4 = inttoptr i64 2 to %Qubit* + %5 = inttoptr i64 4 to %Qubit* call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %4, %Qubit* %5) - %6 = inttoptr i64 2 to %Qubit* - %7 = inttoptr i64 1 to %Qubit* - %8 = inttoptr i64 3 to %Qubit* + %6 = inttoptr i64 4 to %Qubit* + %7 = inttoptr i64 3 to %Qubit* + %8 = inttoptr i64 5 to %Qubit* call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %6, %Qubit* %7, %Qubit* %8) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 16) - %10 = bitcast %Tuple* %9 to { %Result*, %Result* }* - %11 = bitcast %Tuple* %9 to %Result** - %12 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %10, i64 0, i32 1 - %13 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %leftMessage) - %14 = inttoptr i64 3 to %Qubit* - %15 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %14) - store %Result* %13, %Result** %11, align 8 - store %Result* %15, %Result** %12, align 8 - call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) - call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) - ret { %Result*, %Result* }* %10 + ret void } declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr @@ -121,8 +105,6 @@ entry: ret void } -declare %Tuple* @__quantum__rt__tuple_create(i64) local_unnamed_addr - define internal fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) unnamed_addr { entry: call void @__quantum__qis__h(%Qubit* %qubit) @@ -154,8 +136,6 @@ entry: ret void } -declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) local_unnamed_addr - declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) local_unnamed_addr declare void @__quantum__qis__h(%Qubit*) local_unnamed_addr @@ -170,68 +150,21 @@ declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr -define { i8, i8 }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { +define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { entry: - %0 = call fastcc { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - %1 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 0 - %2 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 1 - %3 = load %Result*, %Result** %1, align 8 - %4 = load %Result*, %Result** %2, align 8 - %5 = call %Result* @__quantum__rt__result_get_zero() - %6 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %5) - %not. = xor i1 %6, true - %7 = sext i1 %not. to i8 - %8 = call %Result* @__quantum__rt__result_get_zero() - %9 = call i1 @__quantum__rt__result_equal(%Result* %4, %Result* %8) - %not.1 = xor i1 %9, true - %10 = sext i1 %not.1 to i8 - %11 = call i8* @__quantum__rt__memory_allocate(i64 2) - %12 = bitcast i8* %11 to { i8, i8 }* - store i8 %7, i8* %11, align 1 - %13 = getelementptr i8, i8* %11, i64 1 - store i8 %10, i8* %13, align 1 - call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) - %14 = bitcast { %Result*, %Result* }* %0 to %Tuple* - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) - ret { i8, i8 }* %12 + call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + ret void } -declare %Result* @__quantum__rt__result_get_zero() local_unnamed_addr - -declare i8* @__quantum__rt__memory_allocate(i64) local_unnamed_addr - define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { entry: - %0 = call fastcc { %Result*, %Result* }* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @0, i64 0, i64 0)) - %2 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 0 - %3 = getelementptr inbounds { %Result*, %Result* }, { %Result*, %Result* }* %0, i64 0, i32 1 - %4 = load %Result*, %Result** %2, align 8 - %5 = load %Result*, %Result** %3, align 8 - %6 = call %String* @__quantum__rt__result_to_string(%Result* %4) - %7 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %6) - %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i64 0, i64 0)) - %9 = call %String* @__quantum__rt__string_concatenate(%String* %7, %String* %8) - %10 = call %String* @__quantum__rt__result_to_string(%Result* %5) - %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) - %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i64 0, i64 0)) - %13 = call %String* @__quantum__rt__string_concatenate(%String* %11, %String* %12) - call void @__quantum__rt__message(%String* %13) - call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %5, i32 -1) - %14 = bitcast { %Result*, %Result* }* %0 to %Tuple* - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() ret void } declare void @__quantum__rt__message(%String*) local_unnamed_addr -declare %String* @__quantum__rt__result_to_string(%Result*) local_unnamed_addr - declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr -declare %String* @__quantum__rt__string_concatenate(%String*, %String*) local_unnamed_addr - attributes #0 = { "InteropFriendly" } attributes #1 = { "EntryPoint" } From c94b5480bff419afc9f525fc0ae28eeb44f5ab7b Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 10 Aug 2021 14:53:34 +0200 Subject: [PATCH 069/106] Finalising positive branching --- .../AllocationManager/AllocationManager.cpp | 18 +- .../AllocationManager/AllocationManager.hpp | 8 +- .../TransformationRule/TransformationRule.cpp | 8 + src/Passes/Source/Rules/OperandPrototype.cpp | 1 + src/Passes/Source/Rules/OperandPrototype.hpp | 1 + src/Passes/Source/Rules/ReplacementRule.hpp | 12 + src/Passes/Source/Rules/RuleSet.cpp | 208 +++++++++++++----- .../examples/QubitAllocationAnalysis/test.ll | 19 +- 8 files changed, 197 insertions(+), 78 deletions(-) diff --git a/src/Passes/Source/AllocationManager/AllocationManager.cpp b/src/Passes/Source/AllocationManager/AllocationManager.cpp index 1d1bfe9c2f..8c5a998e11 100644 --- a/src/Passes/Source/AllocationManager/AllocationManager.cpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.cpp @@ -20,15 +20,13 @@ AllocationManager::AllocationManagerPtr AllocationManager::createNew() void AllocationManager::allocate(String const &name, Index const &size, bool value_only) { - // Creating an array to store values - // llvm::errs() << "Allocating " << name << " " << size << "\n"; - if (arrays_.find(name) != arrays_.end()) + if (resources_.find(name) != resources_.end()) { - throw std::runtime_error("Array with name " + name + " already exists."); + throw std::runtime_error("Resource with name " + name + " already exists."); } - arrays_[name].resize(size); - for (auto &v : arrays_[name]) + resources_[name].resize(size); + for (auto &v : resources_[name]) { v = nullptr; } @@ -57,12 +55,12 @@ void AllocationManager::allocate(String const &name, Index const &size, bool val } } -AllocationManager::Array &AllocationManager::get(String const &name) +AllocationManager::Resource &AllocationManager::get(String const &name) { - auto it = arrays_.find(name); - if (it == arrays_.end()) + auto it = resources_.find(name); + if (it == resources_.end()) { - throw std::runtime_error("Array with name " + name + " does not exists."); + throw std::runtime_error("Resource with name " + name + " does not exists."); } return it->second; } diff --git a/src/Passes/Source/AllocationManager/AllocationManager.hpp b/src/Passes/Source/AllocationManager/AllocationManager.hpp index 3a330588bb..55f968de66 100644 --- a/src/Passes/Source/AllocationManager/AllocationManager.hpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.hpp @@ -17,8 +17,8 @@ class AllocationManager using Index = uint64_t; using String = std::string; using AllocationManagerPtr = std::shared_ptr; - using Array = std::vector; - using Arrays = std::unordered_map; + using Resource = std::vector; + using Resources = std::unordered_map; struct MemoryMapping { @@ -37,7 +37,7 @@ class AllocationManager Index getOffset(String const &name) const; void release(String const &name); - Array &get(String const &name); + Resource &get(String const &name); private: AllocationManager() = default; @@ -45,7 +45,7 @@ class AllocationManager NameToIndex name_to_index_; Mappings mappings_; - Arrays arrays_; + Resources resources_; }; } // namespace quantum diff --git a/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp b/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp index 3e4cbfb115..a801eb6db7 100644 --- a/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp +++ b/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp @@ -62,6 +62,14 @@ llvm::PreservedAnalyses TransformationRulePass::run(llvm::Function &function, } } + /* + for (auto &basic_block : function) + { + llvm::errs() << "REPLACEMENTS DONE FOR:\n"; + llvm::errs() << basic_block << "\n\n"; + } + */ + // If we did not change the IR, we report that we preserved all if (replacements_.empty()) { diff --git a/src/Passes/Source/Rules/OperandPrototype.cpp b/src/Passes/Source/Rules/OperandPrototype.cpp index 7c7653da6a..4ce85cc060 100644 --- a/src/Passes/Source/Rules/OperandPrototype.cpp +++ b/src/Passes/Source/Rules/OperandPrototype.cpp @@ -158,6 +158,7 @@ typename InstructionPattern::Child InstructionPattern::copy() const template class InstructionPattern; template class InstructionPattern; template class InstructionPattern; +template class InstructionPattern; #pragma clang diagnostic pop } // namespace quantum diff --git a/src/Passes/Source/Rules/OperandPrototype.hpp b/src/Passes/Source/Rules/OperandPrototype.hpp index 853b16d107..e437db6b2f 100644 --- a/src/Passes/Source/Rules/OperandPrototype.hpp +++ b/src/Passes/Source/Rules/OperandPrototype.hpp @@ -92,6 +92,7 @@ class InstructionPattern : public OperandPrototype using StorePattern = InstructionPattern; using LoadPattern = InstructionPattern; using BitCastPattern = InstructionPattern; +using BranchPattern = InstructionPattern; } // namespace quantum } // namespace microsoft diff --git a/src/Passes/Source/Rules/ReplacementRule.hpp b/src/Passes/Source/Rules/ReplacementRule.hpp index 14a63b9202..657e132626 100644 --- a/src/Passes/Source/Rules/ReplacementRule.hpp +++ b/src/Passes/Source/Rules/ReplacementRule.hpp @@ -69,6 +69,18 @@ inline OperandPrototypePtr BitCast(OperandPrototypePtr arg) return static_cast(cast_pattern); } +inline OperandPrototypePtr Branch(OperandPrototypePtr cond, OperandPrototypePtr arg1, + OperandPrototypePtr arg2) +{ + auto branch_pattern = std::make_shared(); + + branch_pattern->addChild(cond); + branch_pattern->addChild(arg1); + branch_pattern->addChild(arg2); + + return static_cast(branch_pattern); +} + inline OperandPrototypePtr Load(OperandPrototypePtr arg) { auto ret = std::make_shared(); diff --git a/src/Passes/Source/Rules/RuleSet.cpp b/src/Passes/Source/Rules/RuleSet.cpp index cde460e7ac..f804b8e24f 100644 --- a/src/Passes/Source/Rules/RuleSet.cpp +++ b/src/Passes/Source/Rules/RuleSet.cpp @@ -17,7 +17,8 @@ RuleSet::RuleSet() // Shared pointer to be captured in the lambdas of the patterns // Note that you cannot capture this as the reference is destroyed upon // copy. Since PassInfoMixin requires copy, such a construct would break - auto alloc_manager = AllocationManager::createNew(); + auto qubit_alloc_manager = AllocationManager::createNew(); + auto result_alloc_manager = AllocationManager::createNew(); // Pattern 0 - Find type ReplacementRule rule0; @@ -25,7 +26,7 @@ RuleSet::RuleSet() auto get_element = Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); rule0.setPattern("cast"_cap = BitCast("getElement"_cap = get_element)); - rule0.setReplacer([alloc_manager](Builder &, Value *, Captures &cap, Replacements &) { + rule0.setReplacer([qubit_alloc_manager](Builder &, Value *, Captures &cap, Replacements &) { llvm::errs() << "Identified an access attempt" << "\n"; @@ -58,49 +59,49 @@ RuleSet::RuleSet() rule1a.setPattern(std::move(load_pattern)); // Replacement details - rule1a.setReplacer( - [alloc_manager](Builder &builder, Value *val, Captures &cap, Replacements &replacements) { - // Getting the type pointer - auto ptr_type = llvm::dyn_cast(val->getType()); - if (ptr_type == nullptr) - { - return false; - } + rule1a.setReplacer([qubit_alloc_manager](Builder &builder, Value *val, Captures &cap, + Replacements &replacements) { + // Getting the type pointer + auto ptr_type = llvm::dyn_cast(val->getType()); + if (ptr_type == nullptr) + { + return false; + } - // Get the index and testing that it is a constant int - auto cst = llvm::dyn_cast(cap["index"]); - if (cst == nullptr) - { - // ... if not, we cannot perform the mapping. - return false; - } + // Get the index and testing that it is a constant int + auto cst = llvm::dyn_cast(cap["index"]); + if (cst == nullptr) + { + // ... if not, we cannot perform the mapping. + return false; + } - // Computing the index by getting the current index value and offseting by - // the offset at which the qubit array is allocated. - auto llvm_size = cst->getValue(); - auto offset = alloc_manager->getOffset(cap["arrayName"]->getName().str()); + // Computing the index by getting the current index value and offseting by + // the offset at which the qubit array is allocated. + auto llvm_size = cst->getValue(); + auto offset = qubit_alloc_manager->getOffset(cap["arrayName"]->getName().str()); - // Creating a new index APInt that is shifted by the offset of the allocation - auto idx = llvm::APInt(llvm_size.getBitWidth(), llvm_size.getZExtValue() + offset); + // Creating a new index APInt that is shifted by the offset of the allocation + auto idx = llvm::APInt(llvm_size.getBitWidth(), llvm_size.getZExtValue() + offset); - // Computing offset - auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); + // Computing offset + auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); - // TODO(tfr): Understand what the significance of the addressspace is in relation to the - // QIR. Activate by uncommenting: - // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); - auto instr = new llvm::IntToPtrInst(new_index, ptr_type); - instr->takeName(val); + // TODO(tfr): Understand what the significance of the addressspace is in relation to the + // QIR. Activate by uncommenting: + // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); + auto instr = new llvm::IntToPtrInst(new_index, ptr_type); + instr->takeName(val); - // Replacing the instruction with new instruction - replacements.push_back({llvm::dyn_cast(val), instr}); + // Replacing the instruction with new instruction + replacements.push_back({llvm::dyn_cast(val), instr}); - // Deleting the getelement and cast operations - replacements.push_back({llvm::dyn_cast(cap["getElement"]), nullptr}); - replacements.push_back({llvm::dyn_cast(cap["cast"]), nullptr}); + // Deleting the getelement and cast operations + replacements.push_back({llvm::dyn_cast(cap["getElement"]), nullptr}); + replacements.push_back({llvm::dyn_cast(cap["cast"]), nullptr}); - return true; - }); + return true; + }); rules_.emplace_back(std::move(rule1a)); ReplacementRule rule1b; @@ -108,7 +109,7 @@ RuleSet::RuleSet() // Replacement details rule1b.setReplacer( - [alloc_manager](Builder &builder, Value *val, Captures &, Replacements &replacements) { + [qubit_alloc_manager](Builder &builder, Value *val, Captures &, Replacements &replacements) { // Getting the type pointer auto ptr_type = llvm::dyn_cast(val->getType()); if (ptr_type == nullptr) @@ -117,11 +118,11 @@ RuleSet::RuleSet() } // Allocating qubit - alloc_manager->allocate(val->getName().str(), 1); + qubit_alloc_manager->allocate(val->getName().str(), 1); // Computing the index by getting the current index value and offseting by // the offset at which the qubit array is allocated. - auto offset = alloc_manager->getOffset(val->getName().str()); + auto offset = qubit_alloc_manager->getOffset(val->getName().str()); // Creating a new index APInt that is shifted by the offset of the allocation // TODO(tfr): Get the bitwidth size from somewhere @@ -139,7 +140,7 @@ RuleSet::RuleSet() // Replacing the instruction with new instruction replacements.push_back({llvm::dyn_cast(val), instr}); - return false; + return true; }); rules_.emplace_back(std::move(rule1b)); @@ -149,7 +150,7 @@ RuleSet::RuleSet() rule6.setPattern(std::move(allocate_call)); rule6.setReplacer( - [alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { + [qubit_alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { auto cst = llvm::dyn_cast(cap["size"]); if (cst == nullptr) { @@ -158,7 +159,7 @@ RuleSet::RuleSet() auto llvm_size = cst->getValue(); auto name = val->getName().str(); - alloc_manager->allocate(name, llvm_size.getZExtValue()); + qubit_alloc_manager->allocate(name, llvm_size.getZExtValue()); replacements.push_back({llvm::dyn_cast(val), nullptr}); return true; @@ -173,7 +174,7 @@ RuleSet::RuleSet() rule8.setPattern(std::move(allocate_array_call)); rule8.setReplacer( - [alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { + [qubit_alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { auto cst = llvm::dyn_cast(cap["size"]); if (cst == nullptr) { @@ -181,7 +182,7 @@ RuleSet::RuleSet() } auto llvm_size = cst->getValue(); - alloc_manager->allocate(val->getName().str(), llvm_size.getZExtValue(), true); + qubit_alloc_manager->allocate(val->getName().str(), llvm_size.getZExtValue(), true); replacements.push_back({llvm::dyn_cast(val), nullptr}); return true; }); @@ -201,7 +202,7 @@ RuleSet::RuleSet() ReplacementRule rule10; rule10.setPattern(std::move(store_pattern)); - rule10.setReplacer([alloc_manager](Builder &, Value *, Captures &, Replacements &) { + rule10.setReplacer([qubit_alloc_manager](Builder &, Value *, Captures &, Replacements &) { llvm::errs() << "Found store pattern" << "\n"; return false; @@ -209,10 +210,8 @@ RuleSet::RuleSet() rules_.emplace_back(std::move(rule10)); // Measurements - auto replace_measurement = [](Builder &, Value *, Captures &, Replacements &) { - llvm::errs() << "Found measurement" - << "\n"; - + auto replace_measurement = [result_alloc_manager](Builder &builder, Value *val, Captures &cap, + Replacements &replacements) { // Getting the type pointer auto ptr_type = llvm::dyn_cast(val->getType()); if (ptr_type == nullptr) @@ -220,22 +219,119 @@ RuleSet::RuleSet() return false; } - return false; + // Allocating qubit + result_alloc_manager->allocate(val->getName().str(), 1); + + // Computing the index by getting the current index value and offseting by + // the offset at which the qubit array is allocated. + auto offset = result_alloc_manager->getOffset(val->getName().str()); + + // Creating a new index APInt that is shifted by the offset of the allocation + // TODO(tfr): Get the bitwidth size from somewhere + auto idx = llvm::APInt(64, offset); + + // Computing offset + auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); + + // TODO(tfr): Understand what the significance of the addressspace is in relation to the + // QIR. Activate by uncommenting: + // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); + auto instr = new llvm::IntToPtrInst(new_index, ptr_type); + instr->takeName(val); + + auto module = llvm::dyn_cast(val)->getModule(); + auto function = module->getFunction("__quantum__qis__mz__body"); + + std::vector arguments; + arguments.push_back(cap["qubit"]); + arguments.push_back(instr); + + if (!function) + { + std::vector types; + for (auto &arg : arguments) + { + types.push_back(arg->getType()); + } + + auto return_type = llvm::Type::getVoidTy(val->getContext()); + + llvm::FunctionType *fnc_type = llvm::FunctionType::get(return_type, types, false); + function = llvm::Function::Create(fnc_type, llvm::Function::ExternalLinkage, + "__quantum__qis__mz__body", module); + } + + // Ensuring we are inserting after the instruction being deleted + builder.SetInsertPoint(llvm::dyn_cast(val)->getNextNode()); + + builder.CreateCall(function, arguments); + + // Replacing the instruction with new instruction + // TODO: (tfr): insert instruction before and then replace, with new call + replacements.push_back({llvm::dyn_cast(val), instr}); + + return true; }; rules_.emplace_back(Call("__quantum__qis__m__body", "qubit"_cap = _), replace_measurement); // Quantum comparisons - auto get_one = Call("__quantum__rt__result_get_one"); - auto replace_one = [](Builder &, Value *, Captures &, Replacements &) { - llvm::errs() << "Found comparison" + auto get_one = Call("__quantum__rt__result_get_one"); + auto replace_branch_positive = [](Builder &builder, Value *val, Captures &cap, + Replacements &replacements) { + llvm::errs() << "Found branch" << "\n"; + + auto result = cap["result"]; + auto cond = llvm::dyn_cast(cap["cond"]); + + // Replacing result + auto module = llvm::dyn_cast(val)->getModule(); + auto function = module->getFunction("__quantum__qir__read_result"); + std::vector arguments; + arguments.push_back(result); + + if (!function) + { + std::vector types; + for (auto &arg : arguments) + { + types.push_back(arg->getType()); + } + + auto return_type = llvm::Type::getInt1Ty(val->getContext()); + + llvm::FunctionType *fnc_type = llvm::FunctionType::get(return_type, types, false); + function = llvm::Function::Create(fnc_type, llvm::Function::ExternalLinkage, + "__quantum__qir__read_result", module); + } + + builder.SetInsertPoint(llvm::dyn_cast(result)->getNextNode()); + auto new_call = builder.CreateCall(function, arguments); + new_call->takeName(cond); + + for (auto &use : cond->uses()) + { + llvm::User *user = use.getUser(); + user->setOperand(use.getOperandNo(), new_call); + } + + // Deleting the previous condition and function to fetch one + replacements.push_back({cond, nullptr}); + replacements.push_back({cap["one"], nullptr}); + return false; }; // Variations of get_one - rules_.emplace_back(Call("__quantum__rt__result_equal", "result"_cap = _, get_one), replace_one); - rules_.emplace_back(Call("__quantum__rt__result_equal", get_one, "result"_cap = _), replace_one); + rules_.emplace_back(Branch("cond"_cap = Call("__quantum__rt__result_equal", "result"_cap = _, + "one"_cap = get_one), + _, _), + replace_branch_positive); + rules_.emplace_back(Branch("cond"_cap = Call("__quantum__rt__result_equal", "one"_cap = get_one, + "result"_cap = _), + _, _), + replace_branch_positive); // Functions that we do not care about rules_.emplace_back(Call("__quantum__rt__array_update_alias_count", _, _), deleteInstruction()); diff --git a/src/Passes/examples/QubitAllocationAnalysis/test.ll b/src/Passes/examples/QubitAllocationAnalysis/test.ll index 66549dea16..cedc987e1f 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/test.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/test.ll @@ -11,19 +11,17 @@ source_filename = "qir/ConstSizeArray.ll" define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { entry: %0 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) - %1 = call %Result* @__quantum__rt__result_get_one() - %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) - br i1 %2, label %then0__1, label %continue__1 + %1 = call i1 @__quantum__qir__read_result(%Result* %0) + br i1 %1, label %then0__1, label %continue__1 then0__1: ; preds = %entry call fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %dest) br label %continue__1 continue__1: ; preds = %then0__1, %entry - %3 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %intermediary) - %4 = call %Result* @__quantum__rt__result_get_one() - %5 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %4) - br i1 %5, label %then0__2, label %continue__2 + %2 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %intermediary) + %3 = call i1 @__quantum__qir__read_result(%Result* %2) + br i1 %3, label %then0__2, label %continue__2 then0__2: ; preds = %continue__1 call fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %dest) @@ -35,7 +33,8 @@ continue__2: ; preds = %then0__2, %continue define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) unnamed_addr { entry: - %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + %result = inttoptr i64 0 to %Result* + call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) call void @__quantum__qis__reset__body(%Qubit* %target) ret %Result* %result } @@ -166,5 +165,9 @@ declare void @__quantum__rt__message(%String*) local_unnamed_addr declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr +declare i1 @__quantum__qir__read_result(%Result*) + +declare void @__quantum__qis__mz__body(%Qubit*, %Result*) + attributes #0 = { "InteropFriendly" } attributes #1 = { "EntryPoint" } From 75f0326cbebe8cea6dafeb630ca8bdf806d52354 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 10 Aug 2021 15:54:59 +0200 Subject: [PATCH 070/106] Rewriting qubit use analysis --- .../QubitAllocationAnalysis.cpp | 300 ++---------------- .../QubitAllocationAnalysis.hpp | 44 +-- src/Passes/Source/Rules/RuleSet.cpp | 3 +- 3 files changed, 37 insertions(+), 310 deletions(-) diff --git a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp index 02262ad21d..04a1e332ec 100644 --- a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp +++ b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp @@ -12,250 +12,45 @@ namespace microsoft { namespace quantum { -bool QubitAllocationAnalysisAnalytics::operandsConstant(Instruction const &instruction) const -{ - // Default is true (i.e. the case of no operands) - bool ret = true; - - // Checking that all oprands are constant - for (auto &op : instruction.operands()) - { - - // An operand is constant if its value was previously generated from - // a const expression ... - auto const_arg = constantness_dependencies_.find(op) != constantness_dependencies_.end(); - - // ... or if it is just a compile time constant. Note that we - // delibrately only consider integers. We may expand this - // to other constants once we have function support. - auto cst = llvm::dyn_cast(op); - auto is_constant = (cst != nullptr); - - ret = ret && (const_arg || is_constant); - } - - return ret; -} - -void QubitAllocationAnalysisAnalytics::markPossibleConstant(Instruction &instruction) +QubitAllocationAnalysisAnalytics::Result QubitAllocationAnalysisAnalytics::run( + llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) { - // Creating arg dependencies - ArgList all_dependencies{}; - for (auto &op : instruction.operands()) + for (auto &basic_block : function) { - // If the operand has dependecies ... - auto it = constantness_dependencies_.find(op); - if (it != constantness_dependencies_.end()) + for (auto &instr : basic_block) { - // ... we add these as a dependency for the - // resulting instructions value - for (auto &arg : it->second) + auto call_instr = llvm::dyn_cast(&instr); + if (call_instr == nullptr) { - all_dependencies.insert(arg); + continue; } - } - } - - // Adding full list of dependices to the dependency graph - constantness_dependencies_.insert({&instruction, all_dependencies}); -} - -void QubitAllocationAnalysisAnalytics::analyseCall(Instruction &instruction) -{ - // Skipping debug code - /* - TODO(tfr): Enable this in LLVM 12 and upwards - if (instruction.isDebugOrPseudoInst()) - { - return; - } - */ - - // Recovering the call information - auto *call_instr = llvm::dyn_cast(&instruction); - if (call_instr == nullptr) - { - return; - } - - // Getting the name of the function being called - auto target_function = call_instr->getCalledFunction(); - auto name = target_function->getName(); - - // TODO(tfr): Make use of TargetLibraryInfo - if (name != "__quantum__rt__qubit_allocate_array") - { - return; - } - - // We expect only a single argument with the number - // of qubits allocated - if (call_instr->arg_size() != 1) - { - llvm::errs() << "Expected exactly one argument\n"; - return; - } - - // Next we extract the argument ... - auto argument = call_instr->getArgOperand(0); - if (argument == nullptr) - { - llvm::errs() << "Failed getting the size argument\n"; - return; - } - - // ... and checks whether it is a result of a dependant - // const expression - auto it = constantness_dependencies_.find(argument); - if (it != constantness_dependencies_.end()) - { - // If it is, we add the details to the result list - QubitArray qubit_array; - qubit_array.is_possibly_static = true; - qubit_array.variable_name = instruction.getName().str(); - qubit_array.depends_on = it->second; + auto target_function = call_instr->getCalledFunction(); + auto name = target_function->getName(); + // llvm::errs() << "Testing " << name << " : " << *call_instr << "\n"; - // Pushing to the result - results_.push_back(std::move(qubit_array)); - return; - } - - // Otherwise, it may be a static allocation based on a constant (or - // folded constant) - auto cst = llvm::dyn_cast(argument); - if (cst != nullptr) - { - QubitArray qubit_array; - qubit_array.is_possibly_static = true; - qubit_array.variable_name = instruction.getName().str(); - qubit_array.size = cst->getZExtValue(); - - // Pushing to the result - results_.push_back(std::move(qubit_array)); - - return; - } - - // If neither of the previous is the case, we are dealing with a non-static array - QubitArray qubit_array; - qubit_array.is_possibly_static = false; - qubit_array.variable_name = instruction.getName().str(); - - // Storing the result - results_.push_back(std::move(qubit_array)); -} + if (name == "__quantum__rt__qubit_allocate") + { + return {true}; + } -void QubitAllocationAnalysisAnalytics::analyseFunction(llvm::Function &function) -{ - // Clearing results generated in a previous run - results_.clear(); - constantness_dependencies_.clear(); + if (name == "__quantum__rt__qubit_allocate_array") + { + return {true}; + } - // Creating a list with function arguments - for (auto &arg : function.args()) - { - auto s = arg.getName().str(); - constantness_dependencies_.insert({&arg, {s}}); - } + if (name == "__quantum__qis__m__body") + { + return {true}; + } - // Evaluating all expressions - for (auto &basic_block : function) - { - for (auto &instruction : basic_block) - { - auto opcode = instruction.getOpcode(); - switch (opcode) + if (name == "__quantum__qis__z__body") { - case llvm::Instruction::Sub: - case llvm::Instruction::Add: - case llvm::Instruction::Mul: - case llvm::Instruction::Shl: - case llvm::Instruction::LShr: - case llvm::Instruction::AShr: - case llvm::Instruction::And: - case llvm::Instruction::Or: - case llvm::Instruction::Xor: - if (operandsConstant(instruction)) - { - markPossibleConstant(instruction); - } - break; - case llvm::Instruction::Call: - analyseCall(instruction); - break; - // Unanalysed statements - case llvm::Instruction::Ret: - case llvm::Instruction::Br: - case llvm::Instruction::Switch: - case llvm::Instruction::IndirectBr: - case llvm::Instruction::Invoke: - case llvm::Instruction::Resume: - case llvm::Instruction::Unreachable: - case llvm::Instruction::CleanupRet: - case llvm::Instruction::CatchRet: - case llvm::Instruction::CatchSwitch: - case llvm::Instruction::CallBr: - case llvm::Instruction::FNeg: - case llvm::Instruction::FAdd: - case llvm::Instruction::FSub: - case llvm::Instruction::FMul: - case llvm::Instruction::UDiv: - case llvm::Instruction::SDiv: - case llvm::Instruction::FDiv: - case llvm::Instruction::URem: - case llvm::Instruction::SRem: - case llvm::Instruction::FRem: - case llvm::Instruction::Alloca: - case llvm::Instruction::Load: - case llvm::Instruction::Store: - case llvm::Instruction::GetElementPtr: - case llvm::Instruction::Fence: - case llvm::Instruction::AtomicCmpXchg: - case llvm::Instruction::AtomicRMW: - case llvm::Instruction::Trunc: - case llvm::Instruction::ZExt: - case llvm::Instruction::SExt: - case llvm::Instruction::FPToUI: - case llvm::Instruction::FPToSI: - case llvm::Instruction::UIToFP: - case llvm::Instruction::SIToFP: - case llvm::Instruction::FPTrunc: - case llvm::Instruction::FPExt: - case llvm::Instruction::PtrToInt: - case llvm::Instruction::IntToPtr: - case llvm::Instruction::BitCast: - case llvm::Instruction::AddrSpaceCast: - case llvm::Instruction::CleanupPad: - case llvm::Instruction::CatchPad: - case llvm::Instruction::ICmp: - case llvm::Instruction::FCmp: - case llvm::Instruction::PHI: - case llvm::Instruction::Select: - case llvm::Instruction::UserOp1: - case llvm::Instruction::UserOp2: - case llvm::Instruction::VAArg: - case llvm::Instruction::ExtractElement: - case llvm::Instruction::InsertElement: - case llvm::Instruction::ShuffleVector: - case llvm::Instruction::ExtractValue: - case llvm::Instruction::InsertValue: - case llvm::Instruction::LandingPad: - // End of Binary Ops - default: - break; + return {true}; } } } -} -QubitAllocationAnalysisAnalytics::Result QubitAllocationAnalysisAnalytics::run( - llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) -{ - // Running functin analysis - analyseFunction(function); - - // ... and return the result. - return results_; + return {false}; } QubitAllocationAnalysisPrinter::QubitAllocationAnalysisPrinter(llvm::raw_ostream &out_stream) @@ -265,47 +60,16 @@ QubitAllocationAnalysisPrinter::QubitAllocationAnalysisPrinter(llvm::raw_ostream llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run(llvm::Function & function, llvm::FunctionAnalysisManager &fam) { - auto &results = fam.getResult(function); + auto &result = fam.getResult(function); - if (!results.empty()) + if (result.value) { - out_stream_ << function.getName() << "\n"; - out_stream_ << "====================" - << "\n\n"; - for (auto const &ret : results) - { - if (!ret.is_possibly_static) - { - out_stream_ << ret.variable_name << " is dynamic.\n"; - } - else - { - if (ret.depends_on.empty()) - { - out_stream_ << ret.variable_name << " is trivially static with " << ret.size - << " qubits."; - } - else - { - out_stream_ << ret.variable_name << " depends on "; - bool first = true; - for (auto &x : ret.depends_on) - { - if (!first) - { - out_stream_ << ", "; - } - out_stream_ << x; - first = false; - } - out_stream_ << " being constant to be static."; - } - } - - out_stream_ << "\n"; - } + out_stream_ << function.getName() << " contains quantum allocations.\n"; + } + else + { + out_stream_ << function.getName() << " is logic only.\n"; } - return llvm::PreservedAnalyses::all(); } diff --git a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp index 68de055bed..3b8289bf83 100644 --- a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp +++ b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp @@ -14,29 +14,13 @@ class QubitAllocationAnalysisAnalytics : public llvm::AnalysisInfoMixin { public: - using String = std::string; - using ArgList = std::unordered_set; + using String = std::string; - struct QubitArray + struct Result { - bool is_possibly_static{false}; ///< Indicates whether the array is - /// possibly static or not - /// - String variable_name{}; ///< Name of the qubit array - ArgList depends_on{}; ///< Function arguments that - /// determines if it is constant or not - /// - uint64_t size{static_cast(-1)}; ///< Size of the array if it can be deduced. + bool value{false}; }; - using Value = llvm::Value; - using DependencyGraph = std::unordered_map; - using ValueDependencyGraph = std::unordered_map; - - using Instruction = llvm::Instruction; - using Function = llvm::Function; - using Result = std::vector; - /// Constructors and destructors /// @{ QubitAllocationAnalysisAnalytics() = default; @@ -56,31 +40,9 @@ class QubitAllocationAnalysisAnalytics Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/); /// @} - /// Function analysis - /// @{ - void analyseFunction(llvm::Function &function); - /// @} - - /// Instruction analysis - /// @{ - bool operandsConstant(Instruction const &instruction) const; - void markPossibleConstant(Instruction &instruction); - void analyseCall(Instruction &instruction); - /// @} - private: static llvm::AnalysisKey Key; // NOLINT friend struct llvm::AnalysisInfoMixin; - - /// Analysis details - /// @{ - ValueDependencyGraph constantness_dependencies_{}; - /// @} - - /// Result - /// @{ - Result results_{}; - /// @} }; class QubitAllocationAnalysisPrinter : public llvm::PassInfoMixin diff --git a/src/Passes/Source/Rules/RuleSet.cpp b/src/Passes/Source/Rules/RuleSet.cpp index f804b8e24f..fdaf0be95c 100644 --- a/src/Passes/Source/Rules/RuleSet.cpp +++ b/src/Passes/Source/Rules/RuleSet.cpp @@ -168,6 +168,7 @@ RuleSet::RuleSet() rules_.emplace_back(std::move(rule6)); // Rule 8 - standard array allocation + /* ReplacementRule rule8; auto allocate_array_call = Call("__quantum__rt__array_create_1d", "elementSize"_cap = _, "size"_cap = _); @@ -188,7 +189,7 @@ RuleSet::RuleSet() }); rules_.emplace_back(std::move(rule8)); - +*/ // Rule 10 - track stored values auto get_target_element = Call("__quantum__rt__array_get_element_ptr_1d", "targetArrayName"_cap = _, "targetIndex"_cap = _); From c865fcd11c17a14c146a25d5a2b904d33b026837 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 10 Aug 2021 20:52:35 +0200 Subject: [PATCH 071/106] Updates to make last bit a transformation work --- .../AllocationManager/AllocationManager.cpp | 28 +- .../AllocationManager/AllocationManager.hpp | 3 + .../ExpandStaticAllocation.cpp | 141 +++---- .../ExpandStaticAllocation.hpp | 3 +- .../TransformationRule/TransformationRule.cpp | 11 +- src/Passes/Source/Rules/RuleSet.cpp | 35 +- .../ConstSizeArray/ConstSizeArray.qs | 6 +- .../ConstSizeArray/qir/ConstSizeArray.ll | 41 +- .../examples/QubitAllocationAnalysis/Makefile | 8 +- .../analysis-example.ll | 7 + .../examples/QubitAllocationAnalysis/test.ll | 173 -------- .../examples/QubitAllocationAnalysis/test1.ll | 378 ++++++++++++++++++ .../examples/QubitAllocationAnalysis/test2.ll | 176 ++++++++ 13 files changed, 692 insertions(+), 318 deletions(-) create mode 100644 src/Passes/examples/QubitAllocationAnalysis/test1.ll create mode 100644 src/Passes/examples/QubitAllocationAnalysis/test2.ll diff --git a/src/Passes/Source/AllocationManager/AllocationManager.cpp b/src/Passes/Source/AllocationManager/AllocationManager.cpp index 8c5a998e11..d6019ed6cf 100644 --- a/src/Passes/Source/AllocationManager/AllocationManager.cpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.cpp @@ -18,6 +18,13 @@ AllocationManager::AllocationManagerPtr AllocationManager::createNew() return ret; } +AllocationManager::Index AllocationManager::allocate() +{ + auto ret = start_; + ++start_; + return ret; +} + void AllocationManager::allocate(String const &name, Index const &size, bool value_only) { if (resources_.find(name) != resources_.end()) @@ -47,7 +54,8 @@ void AllocationManager::allocate(String const &name, Index const &size, bool val name_to_index_[map.name] = map.index; if (!mappings_.empty()) { - map.start = mappings_.back().end; + map.start = start_; + start_ += size; } map.end = map.start + size; @@ -77,8 +85,22 @@ AllocationManager::Index AllocationManager::getOffset(String const &name) const return mappings_[index].start; } -void AllocationManager::release(String const & /*name*/) -{} +void AllocationManager::release(String const &name) +{ + auto it = name_to_index_.find(name); + if (it == name_to_index_.end()) + { + throw std::runtime_error("Memory segment with name " + name + " not found."); + } + name_to_index_.erase(it); + + auto it2 = resources_.find(name); + if (it2 == resources_.end()) + { + throw std::runtime_error("Resource with name " + name + " does not exists."); + } + resources_.erase(it2); +} } // namespace quantum } // namespace microsoft diff --git a/src/Passes/Source/AllocationManager/AllocationManager.hpp b/src/Passes/Source/AllocationManager/AllocationManager.hpp index 55f968de66..d4b1df39f4 100644 --- a/src/Passes/Source/AllocationManager/AllocationManager.hpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.hpp @@ -33,6 +33,7 @@ class AllocationManager static AllocationManagerPtr createNew(); + Index allocate(); void allocate(String const &name, Index const &size, bool value_only = false); Index getOffset(String const &name) const; void release(String const &name); @@ -46,6 +47,8 @@ class AllocationManager Mappings mappings_; Resources resources_; + + Index start_{0}; }; } // namespace quantum diff --git a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp index 7c913b76b5..a4b4b484e5 100644 --- a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp +++ b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp @@ -32,9 +32,9 @@ llvm::PreservedAnalyses ExpandStaticAllocationPass::run(llvm::Function & std::vector remaining_arguments{}; auto callee_function = call_instr->getCalledFunction(); - auto &depenency_graph = fam.getResult(*callee_function); + auto &use_quantum = fam.getResult(*callee_function); - if (depenency_graph.size() > 0) + if (use_quantum.value) { uint32_t idx = 0; auto n = static_cast(callee_function->arg_size()); @@ -59,13 +59,12 @@ llvm::PreservedAnalyses ExpandStaticAllocationPass::run(llvm::Function & } // Checking which arrays are constant for this - auto new_callee = expandFunctionCall(depenency_graph, *callee_function, argument_constants); + auto new_callee = expandFunctionCall(*callee_function, argument_constants); // Replacing call if a new function was created if (new_callee != nullptr) { llvm::IRBuilder<> builder(call_instr); - (void)call_instr; // List with new call arguments std::vector new_arguments; @@ -83,13 +82,10 @@ llvm::PreservedAnalyses ExpandStaticAllocationPass::run(llvm::Function & // Creating a new call llvm::Value *new_call = builder.CreateCall(new_callee, new_arguments); + new_call->takeName(call_instr); // Replace all calls to old function with calls to new function - for (auto &use : call_instr->uses()) - { - llvm::User *user = use.getUser(); - user->setOperand(use.getOperandNo(), new_call); - } + instruction.replaceAllUsesWith(new_call); // Schedule original instruction for deletion to_remove.push_back(&instruction); @@ -111,107 +107,64 @@ llvm::PreservedAnalyses ExpandStaticAllocationPass::run(llvm::Function & return llvm::PreservedAnalyses::none(); } -llvm::Function *ExpandStaticAllocationPass::expandFunctionCall( - QubitAllocationResult const &depenency_graph, llvm::Function &callee, - ConstantArguments const &const_args) +llvm::Function *ExpandStaticAllocationPass::expandFunctionCall(llvm::Function & callee, + ConstantArguments const &const_args) { - bool should_replace_function = false; - if (!depenency_graph.empty()) + auto module = callee.getParent(); + auto & context = module->getContext(); + llvm::IRBuilder<> builder(context); + + // Copying the original function + llvm::ValueToValueMapTy remapper; + std::vector arg_types; + + // The user might be deleting arguments to the function by specifying them in + // the VMap. If so, we need to not add the arguments to the arg ty vector + // + for (auto const &arg : callee.args()) { - // Checking that any of all allocations in the function - // body becomes static from replacing constant function arguments - for (auto const &allocation : depenency_graph) + // Skipping constant arguments + + if (const_args.find(arg.getName().str()) != const_args.end()) { - // Ignoring non-static allocations - if (!allocation.is_possibly_static) - { - continue; - } + continue; + } - // Ignoring trivial allocations - if (allocation.depends_on.empty()) - { - continue; - } + arg_types.push_back(arg.getType()); + } - // Checking all dependencies are constant - bool all_const = true; - for (auto &name : allocation.depends_on) - { - all_const = all_const && (const_args.find(name) != const_args.end()); - } + // Creating a new function + llvm::FunctionType *function_type = llvm::FunctionType::get( + callee.getFunctionType()->getReturnType(), arg_types, callee.getFunctionType()->isVarArg()); + auto function = llvm::Function::Create(function_type, callee.getLinkage(), + callee.getAddressSpace(), callee.getName(), module); - // In case that all dependencies are constant for this - // allocation, we should replace the function with one where - // the arguments are eliminated. - if (all_const) - { - should_replace_function = true; - } - } - } + // Copying the non-const arguments + auto dest_args_it = function->arg_begin(); - // Replacing function if needed - if (should_replace_function) + for (auto const &arg : callee.args()) { - auto module = callee.getParent(); - auto & context = module->getContext(); - llvm::IRBuilder<> builder(context); - - // Copying the original function - llvm::ValueToValueMapTy remapper; - std::vector arg_types; - - // The user might be deleting arguments to the function by specifying them in - // the VMap. If so, we need to not add the arguments to the arg ty vector - // - for (auto const &arg : callee.args()) + auto const_it = const_args.find(arg.getName().str()); + if (const_it == const_args.end()) { - // Skipping constant arguments - - if (const_args.find(arg.getName().str()) != const_args.end()) - { - continue; - } - - arg_types.push_back(arg.getType()); + // Mapping remaining function arguments + dest_args_it->setName(arg.getName()); + remapper[&arg] = &*dest_args_it++; } - - // Creating a new function - llvm::FunctionType *function_type = llvm::FunctionType::get( - callee.getFunctionType()->getReturnType(), arg_types, callee.getFunctionType()->isVarArg()); - auto function = llvm::Function::Create(function_type, callee.getLinkage(), - callee.getAddressSpace(), callee.getName(), module); - - // Copying the non-const arguments - auto dest_args_it = function->arg_begin(); - - for (auto const &arg : callee.args()) + else { - auto const_it = const_args.find(arg.getName().str()); - if (const_it == const_args.end()) - { - // Mapping remaining function arguments - dest_args_it->setName(arg.getName()); - remapper[&arg] = &*dest_args_it++; - } - else - { - remapper[&arg] = llvm::ConstantInt::get(context, const_it->second->getValue()); - } + remapper[&arg] = llvm::ConstantInt::get(context, const_it->second->getValue()); } + } - llvm::SmallVector returns; // Ignore returns cloned. - - // TODO(tfr): In LLVM 13 upgrade 'true' to 'llvm::CloneFunctionChangeType::LocalChangesOnly' - llvm::CloneFunctionInto(function, &callee, remapper, true, returns, "", nullptr); + llvm::SmallVector returns; // Ignore returns cloned. - verifyFunction(*function); + // TODO(tfr): In LLVM 13 upgrade 'true' to 'llvm::CloneFunctionChangeType::LocalChangesOnly' + llvm::CloneFunctionInto(function, &callee, remapper, true, returns, "", nullptr); - return function; - } + verifyFunction(*function); - return nullptr; + return function; } bool ExpandStaticAllocationPass::isRequired() diff --git a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp index 8803026292..b4d34e98a1 100644 --- a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp +++ b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp @@ -37,8 +37,7 @@ class ExpandStaticAllocationPass : public llvm::PassInfoMixingetType(); // This rule only deals with access to arrays of opaque types @@ -62,6 +59,7 @@ RuleSet::RuleSet() rule1a.setReplacer([qubit_alloc_manager](Builder &builder, Value *val, Captures &cap, Replacements &replacements) { // Getting the type pointer + auto ptr_type = llvm::dyn_cast(val->getType()); if (ptr_type == nullptr) { @@ -117,12 +115,9 @@ RuleSet::RuleSet() return false; } - // Allocating qubit - qubit_alloc_manager->allocate(val->getName().str(), 1); - // Computing the index by getting the current index value and offseting by // the offset at which the qubit array is allocated. - auto offset = qubit_alloc_manager->getOffset(val->getName().str()); + auto offset = qubit_alloc_manager->allocate(); // Creating a new index APInt that is shifted by the offset of the allocation // TODO(tfr): Get the bitwidth size from somewhere @@ -220,12 +215,9 @@ RuleSet::RuleSet() return false; } - // Allocating qubit - result_alloc_manager->allocate(val->getName().str(), 1); - // Computing the index by getting the current index value and offseting by // the offset at which the qubit array is allocated. - auto offset = result_alloc_manager->getOffset(val->getName().str()); + auto offset = result_alloc_manager->allocate(); // Creating a new index APInt that is shifted by the offset of the allocation // TODO(tfr): Get the bitwidth size from somewhere @@ -280,12 +272,8 @@ RuleSet::RuleSet() auto get_one = Call("__quantum__rt__result_get_one"); auto replace_branch_positive = [](Builder &builder, Value *val, Captures &cap, Replacements &replacements) { - llvm::errs() << "Found branch" - << "\n"; - auto result = cap["result"]; auto cond = llvm::dyn_cast(cap["cond"]); - // Replacing result auto module = llvm::dyn_cast(val)->getModule(); auto function = module->getFunction("__quantum__qir__read_result"); @@ -306,8 +294,9 @@ RuleSet::RuleSet() function = llvm::Function::Create(fnc_type, llvm::Function::ExternalLinkage, "__quantum__qir__read_result", module); } + auto result_inst = llvm::dyn_cast(result); - builder.SetInsertPoint(llvm::dyn_cast(result)->getNextNode()); + builder.SetInsertPoint(result_inst->getNextNode()); auto new_call = builder.CreateCall(function, arguments); new_call->takeName(cond); @@ -316,6 +305,7 @@ RuleSet::RuleSet() llvm::User *user = use.getUser(); user->setOperand(use.getOperandNo(), new_call); } + cond->replaceAllUsesWith(new_call); // Deleting the previous condition and function to fetch one replacements.push_back({cond, nullptr}); @@ -334,6 +324,17 @@ RuleSet::RuleSet() _, _), replace_branch_positive); + auto deleter = deleteInstruction(); + rules_.emplace_back(Call("__quantum__rt__qubit_release_array", "name"_cap = _), + [qubit_alloc_manager, deleter](Builder &builder, Value *val, Captures &cap, + Replacements &rep) { + qubit_alloc_manager->release(cap["name"]->getName().str()); + return deleter(builder, val, cap, rep); + } + + ); + rules_.emplace_back(Call("__quantum__rt__qubit_release", _), deleteInstruction()); + // Functions that we do not care about rules_.emplace_back(Call("__quantum__rt__array_update_alias_count", _, _), deleteInstruction()); rules_.emplace_back(Call("__quantum__rt__string_update_alias_count", _, _), deleteInstruction()); @@ -345,8 +346,6 @@ RuleSet::RuleSet() rules_.emplace_back(Call("__quantum__rt__result_update_reference_count", _, _), deleteInstruction()); - rules_.emplace_back(Call("__quantum__rt__qubit_release_array", _), deleteInstruction()); - rules_.emplace_back(Call("__quantum__rt__qubit_release", _), deleteInstruction()); rules_.emplace_back(Call("__quantum__rt__string_create", _), deleteInstruction()); rules_.emplace_back(Call("__quantum__rt__string_release", _), deleteInstruction()); diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index ccfdf466c1..f0bc733273 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -46,7 +46,9 @@ namespace TeleportChain { for i in 1..nPairs-1 { TeleportQubitUsingPresharedEntanglement(rightPreshared[i-1], leftPreshared[i], rightPreshared[i]); } - - // return (MResetZ(leftMessage), MResetZ(rightPreshared[nPairs-1])); + + let _ = MResetZ(leftMessage); + let _ = MResetZ(rightPreshared[nPairs-1]); + // return (); } } \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll index 55239e8db1..7dbd00d86e 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll @@ -141,8 +141,15 @@ exiting__2: ; preds = %body__2 br label %header__2 exit__2: ; preds = %header__2 + %26 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %leftMessage) + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %28 = bitcast i8* %27 to %Qubit** + %29 = load %Qubit*, %Qubit** %28, align 8 + %30 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %29) call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %26, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %30, i32 -1) call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) @@ -266,7 +273,7 @@ else__1: ; preds = %test1__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %10, align 8 store %Qubit* %qubit, %Qubit** %11, align 8 - call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %7, { %Array*, %Qubit* }* %9) + call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %7, { %Array*, %Qubit* }* %9) call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -321,7 +328,7 @@ else__1: ; preds = %test1__1 store %Qubit* %target, %Qubit** %18, align 8 store %Array* %ctls, %Array** %13, align 8 store { %Qubit*, %Qubit* }* %16, { %Qubit*, %Qubit* }** %14, align 8 - call void @Microsoft__Quantum__Intrinsic___94a8d85ac915454e866ebb12115de6b8___QsRef23__ApplyWithLessControlsA____body(%Callable* %10, { %Array*, { %Qubit*, %Qubit* }* }* %12) + call void @Microsoft__Quantum__Intrinsic___27e64f0afee94ef4bf9523108ce47367___QsRef23__ApplyWithLessControlsA____body(%Callable* %10, { %Array*, { %Qubit*, %Qubit* }* }* %12) call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -1043,7 +1050,7 @@ else__1: ; preds = %test2__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %50, align 8 store %Qubit* %qubit, %Qubit** %51, align 8 - call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %47, { %Array*, %Qubit* }* %49) + call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %47, { %Array*, %Qubit* }* %49) call void @__quantum__rt__capture_update_reference_count(%Callable* %47, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %47, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -1135,7 +1142,7 @@ else__1: ; preds = %test2__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %17, align 8 store %Qubit* %qubit, %Qubit** %18, align 8 - call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %14, { %Array*, %Qubit* }* %16) + call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %14, { %Array*, %Qubit* }* %16) call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -1172,7 +1179,7 @@ entry: ret void } -define internal void @Microsoft__Quantum__Intrinsic___94a8d85ac915454e866ebb12115de6b8___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic___27e64f0afee94ef4bf9523108ce47367___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { entry: call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) @@ -1363,7 +1370,7 @@ declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) -define internal void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, %Qubit* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, %Qubit* }* %0) { entry: call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) @@ -1780,7 +1787,7 @@ else__1: ; preds = %test1__1 store %Qubit* %qubit, %Qubit** %23, align 8 store %Array* %ctls, %Array** %18, align 8 store { double, %Qubit* }* %21, { double, %Qubit* }** %19, align 8 - call void @Microsoft__Quantum__Intrinsic___9ebafa51892848d598fa08bc663d8d45___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) + call void @Microsoft__Quantum__Intrinsic___7f72c45e20854241afccc66f6e99a31b___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -1846,7 +1853,7 @@ else__1: ; preds = %test1__1 store %Qubit* %qubit, %Qubit** %23, align 8 store %Array* %ctls, %Array** %18, align 8 store { double, %Qubit* }* %21, { double, %Qubit* }** %19, align 8 - call void @Microsoft__Quantum__Intrinsic___9ebafa51892848d598fa08bc663d8d45___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) + call void @Microsoft__Quantum__Intrinsic___7f72c45e20854241afccc66f6e99a31b___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -1911,7 +1918,7 @@ else__1: ; preds = %test1__1 store %Qubit* %qubit, %Qubit** %24, align 8 store %Array* %ctls, %Array** %19, align 8 store { double, %Qubit* }* %22, { double, %Qubit* }** %20, align 8 - call void @Microsoft__Quantum__Intrinsic___9ebafa51892848d598fa08bc663d8d45___QsRef23__ApplyWithLessControlsA____body(%Callable* %16, { %Array*, { double, %Qubit* }* }* %18) + call void @Microsoft__Quantum__Intrinsic___7f72c45e20854241afccc66f6e99a31b___QsRef23__ApplyWithLessControlsA____body(%Callable* %16, { %Array*, { double, %Qubit* }* }* %18) call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %16, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -2282,7 +2289,7 @@ declare double @llvm.pow.f64(double, double) #0 declare void @__quantum__qis__rx(double, %Qubit*) -define internal void @Microsoft__Quantum__Intrinsic___9ebafa51892848d598fa08bc663d8d45___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic___7f72c45e20854241afccc66f6e99a31b___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { entry: call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) @@ -2604,7 +2611,7 @@ else__1: ; preds = %test1__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %16, align 8 store %Qubit* %qubit, %Qubit** %17, align 8 - call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -2700,7 +2707,7 @@ else__1: ; preds = %test1__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %16, align 8 store %Qubit* %qubit, %Qubit** %17, align 8 - call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -2761,7 +2768,7 @@ else__1: ; preds = %test1__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %16, align 8 store %Qubit* %qubit, %Qubit** %17, align 8 - call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -2857,7 +2864,7 @@ else__1: ; preds = %test1__1 call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) store %Array* %ctls, %Array** %16, align 8 store %Qubit* %qubit, %Qubit** %17, align 8 - call void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) @@ -2983,7 +2990,7 @@ declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) -define internal void @Microsoft__Quantum__Intrinsic___d0c5e5bdde45429e90c2a44a56eb8a85___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, %Qubit* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, %Qubit* }* %0) { entry: call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) @@ -3126,7 +3133,7 @@ declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) -define internal void @Microsoft__Quantum__Intrinsic___9ebafa51892848d598fa08bc663d8d45___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic___7f72c45e20854241afccc66f6e99a31b___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { entry: call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) @@ -3270,7 +3277,7 @@ exit__2: ; preds = %header__2 ret void } -define internal void @Microsoft__Quantum__Intrinsic___94a8d85ac915454e866ebb12115de6b8___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { +define internal void @Microsoft__Quantum__Intrinsic___27e64f0afee94ef4bf9523108ce47367___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { entry: call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index d6bd5f6635..7815e611e2 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -6,10 +6,12 @@ run-expand: build-qaa build-esa analysis-example.ll run: build-qaa analysis-example.ll opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll -run-replace: build-ir analysis-example.ll +run-replace: build-ir build-qaa build-esa analysis-example.ll # opt -loop-unroll -unroll-count=3 -unroll-allow-partial - opt -load-pass-plugin ../../Debug/Source/Passes/libTransformationRule.dylib --passes="mem2reg,simplifycfg,loop-simplify,loop-unroll,transformation-rule" -S analysis-example.ll > test.ll - opt --passes="inline" -S test.ll | opt -O1 -S +# opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib \ +# -load-pass-plugin ../../Debug/Source/Passes/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll > test1.ll + opt -load-pass-plugin ../../Debug/Source/Passes/libTransformationRule.dylib --passes="mem2reg,simplifycfg,loop-simplify,loop-unroll,transformation-rule" -S analysis-example.ll > test2.ll + opt --passes="inline" -S test2.ll | opt -O1 -S build-prepare: diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll index bccae3a539..01079efdd2 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -100,8 +100,15 @@ entry: %25 = bitcast i8* %24 to %Qubit** %26 = load %Qubit*, %Qubit** %25, align 8 call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %20, %Qubit* %23, %Qubit* %26) + %27 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %leftMessage) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %29 = bitcast i8* %28 to %Qubit** + %30 = load %Qubit*, %Qubit** %29, align 8 + %31 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %30) call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %27, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 -1) call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) diff --git a/src/Passes/examples/QubitAllocationAnalysis/test.ll b/src/Passes/examples/QubitAllocationAnalysis/test.ll index cedc987e1f..e69de29bb2 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/test.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/test.ll @@ -1,173 +0,0 @@ -; ModuleID = 'analysis-example.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Qubit = type opaque -%Result = type opaque -%Array = type opaque -%String = type opaque - -@0 = internal constant [3 x i8] c"()\00" - -define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { -entry: - %0 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) - %1 = call i1 @__quantum__qir__read_result(%Result* %0) - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %dest) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - %2 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %intermediary) - %3 = call i1 @__quantum__qir__read_result(%Result* %2) - br i1 %3, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - call fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %dest) - br label %continue__2 - -continue__2: ; preds = %then0__2, %continue__1 - ret void -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) unnamed_addr { -entry: - %result = inttoptr i64 0 to %Result* - call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -declare %Result* @__quantum__rt__result_get_one() local_unnamed_addr - -declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr - -declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr - -define internal fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) unnamed_addr { -entry: - call void @__quantum__qis__z(%Qubit* %qubit) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) unnamed_addr { -entry: - call void @__quantum__qis__x(%Qubit* %qubit) - ret void -} - -define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { -entry: - %leftMessage = inttoptr i64 0 to %Qubit* - %rightMessage = inttoptr i64 1 to %Qubit* - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) - %0 = inttoptr i64 2 to %Qubit* - %1 = inttoptr i64 4 to %Qubit* - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %0, %Qubit* %1) - %2 = inttoptr i64 3 to %Qubit* - %3 = inttoptr i64 5 to %Qubit* - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %3) - %4 = inttoptr i64 2 to %Qubit* - %5 = inttoptr i64 4 to %Qubit* - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %4, %Qubit* %5) - %6 = inttoptr i64 4 to %Qubit* - %7 = inttoptr i64 3 to %Qubit* - %8 = inttoptr i64 5 to %Qubit* - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %6, %Qubit* %7, %Qubit* %8) - ret void -} - -declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -define internal fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %left, %Qubit* %right) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %left) - call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %left, %Qubit* %right) - ret void -} - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr - -define internal fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { -entry: - call fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %src, %Qubit* %intermediary) - call fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) unnamed_addr { -entry: - call void @__quantum__qis__h(%Qubit* %qubit) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) unnamed_addr { -entry: - call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) - ret void -} - -define internal fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %left, %Qubit* %right) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %left, %Qubit* %right) - call fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %left) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) - ret void -} - -declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) local_unnamed_addr - -declare void @__quantum__qis__h(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__x(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__z(%Qubit*) local_unnamed_addr - -declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr - -declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr - -define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { -entry: - call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - ret void -} - -define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { -entry: - call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -declare i1 @__quantum__qir__read_result(%Result*) - -declare void @__quantum__qis__mz__body(%Qubit*, %Result*) - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/test1.ll b/src/Passes/examples/QubitAllocationAnalysis/test1.ll new file mode 100644 index 0000000000..ac6d18ca6f --- /dev/null +++ b/src/Passes/examples/QubitAllocationAnalysis/test1.ll @@ -0,0 +1,378 @@ +; ModuleID = 'analysis-example.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Qubit = type opaque +%Result = type opaque +%Array = type opaque +%String = type opaque + +@0 = internal constant [3 x i8] c"()\00" + +define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { +entry: + %0 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.1(%Qubit* %src) + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %dest) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + %3 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.2(%Qubit* %intermediary) + %4 = call %Result* @__quantum__rt__result_get_one() + %5 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %4) + call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) + br i1 %5, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + call fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %dest) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + ret void +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) unnamed_addr { +entry: + %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +declare %Result* @__quantum__rt__result_get_one() local_unnamed_addr + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr + +define internal fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) unnamed_addr { +entry: + call void @__quantum__qis__z(%Qubit* %qubit) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) unnamed_addr { +entry: + call void @__quantum__qis__x(%Qubit* %qubit) + ret void +} + +define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { +entry: + %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %leftPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 1) + %rightPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 1) + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %2 = load %Qubit*, %Qubit** %1, align 8 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %4 = bitcast i8* %3 to %Qubit** + %5 = load %Qubit*, %Qubit** %4, align 8 + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %5) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %10 = bitcast i8* %9 to %Qubit** + %11 = load %Qubit*, %Qubit** %10, align 8 + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %8, %Qubit* %11) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %14, %Qubit* %17) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %19 = bitcast i8* %18 to %Qubit** + %20 = load %Qubit*, %Qubit** %19, align 8 + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) + %22 = bitcast i8* %21 to %Qubit** + %23 = load %Qubit*, %Qubit** %22, align 8 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %25 = bitcast i8* %24 to %Qubit** + %26 = load %Qubit*, %Qubit** %25, align 8 + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %20, %Qubit* %23, %Qubit* %26) + %27 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3(%Qubit* %leftMessage) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %29 = bitcast i8* %28 to %Qubit** + %30 = load %Qubit*, %Qubit** %29, align 8 + %31 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4(%Qubit* %30) + call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %27, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) + call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) + call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) + call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) + ret void +} + +declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +define internal fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %left, %Qubit* %right) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %left) + call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %left, %Qubit* %right) + ret void +} + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr + +define internal fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { +entry: + call fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %src, %Qubit* %intermediary) + call fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) unnamed_addr { +entry: + call void @__quantum__qis__h(%Qubit* %qubit) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) unnamed_addr { +entry: + call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %left, %Qubit* %right) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %left, %Qubit* %right) + call fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %left) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) local_unnamed_addr + +declare void @__quantum__qis__h(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__x(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__z(%Qubit*) local_unnamed_addr + +declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr + +declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr + +define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { +entry: + call void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.5() + ret void +} + +define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { +entry: + call void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.6() + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i64 0, i64 0)) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.1(%Qubit* %target) unnamed_addr { +entry: + %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.2(%Qubit* %target) unnamed_addr { +entry: + %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3(%Qubit* %target) unnamed_addr { +entry: + %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4(%Qubit* %target) unnamed_addr { +entry: + %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.5() unnamed_addr { +entry: + %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %leftPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 1) + %rightPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 1) + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %2 = load %Qubit*, %Qubit** %1, align 8 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %4 = bitcast i8* %3 to %Qubit** + %5 = load %Qubit*, %Qubit** %4, align 8 + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %5) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %10 = bitcast i8* %9 to %Qubit** + %11 = load %Qubit*, %Qubit** %10, align 8 + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %8, %Qubit* %11) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %14, %Qubit* %17) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %19 = bitcast i8* %18 to %Qubit** + %20 = load %Qubit*, %Qubit** %19, align 8 + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) + %22 = bitcast i8* %21 to %Qubit** + %23 = load %Qubit*, %Qubit** %22, align 8 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %25 = bitcast i8* %24 to %Qubit** + %26 = load %Qubit*, %Qubit** %25, align 8 + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %20, %Qubit* %23, %Qubit* %26) + %27 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.7(%Qubit* %leftMessage) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %29 = bitcast i8* %28 to %Qubit** + %30 = load %Qubit*, %Qubit** %29, align 8 + %31 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.8(%Qubit* %30) + call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %27, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) + call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) + call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) + call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) + ret void +} + +define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.6() unnamed_addr { +entry: + %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %leftPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 1) + %rightPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 1) + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %2 = load %Qubit*, %Qubit** %1, align 8 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %4 = bitcast i8* %3 to %Qubit** + %5 = load %Qubit*, %Qubit** %4, align 8 + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %5) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %10 = bitcast i8* %9 to %Qubit** + %11 = load %Qubit*, %Qubit** %10, align 8 + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %8, %Qubit* %11) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %14, %Qubit* %17) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %19 = bitcast i8* %18 to %Qubit** + %20 = load %Qubit*, %Qubit** %19, align 8 + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) + %22 = bitcast i8* %21 to %Qubit** + %23 = load %Qubit*, %Qubit** %22, align 8 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %25 = bitcast i8* %24 to %Qubit** + %26 = load %Qubit*, %Qubit** %25, align 8 + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %20, %Qubit* %23, %Qubit* %26) + %27 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.9(%Qubit* %leftMessage) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %29 = bitcast i8* %28 to %Qubit** + %30 = load %Qubit*, %Qubit** %29, align 8 + %31 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.10(%Qubit* %30) + call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %27, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) + call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) + call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) + call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) + ret void +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.7(%Qubit* %target) unnamed_addr { +entry: + %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.8(%Qubit* %target) unnamed_addr { +entry: + %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.9(%Qubit* %target) unnamed_addr { +entry: + %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.10(%Qubit* %target) unnamed_addr { +entry: + %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/test2.ll b/src/Passes/examples/QubitAllocationAnalysis/test2.ll new file mode 100644 index 0000000000..906107066b --- /dev/null +++ b/src/Passes/examples/QubitAllocationAnalysis/test2.ll @@ -0,0 +1,176 @@ +; ModuleID = 'analysis-example.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Qubit = type opaque +%Result = type opaque +%Array = type opaque +%String = type opaque + +@0 = internal constant [3 x i8] c"()\00" + +define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { +entry: + %0 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) + %1 = call i1 @__quantum__qir__read_result(%Result* %0) + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %dest) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + %2 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %intermediary) + %3 = call i1 @__quantum__qir__read_result(%Result* %2) + br i1 %3, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + call fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %dest) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + ret void +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) unnamed_addr { +entry: + %result = inttoptr i64 0 to %Result* + call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +declare %Result* @__quantum__rt__result_get_one() local_unnamed_addr + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr + +define internal fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) unnamed_addr { +entry: + call void @__quantum__qis__z(%Qubit* %qubit) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) unnamed_addr { +entry: + call void @__quantum__qis__x(%Qubit* %qubit) + ret void +} + +define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { +entry: + %leftMessage = inttoptr i64 0 to %Qubit* + %rightMessage = inttoptr i64 1 to %Qubit* + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) + %0 = inttoptr i64 0 to %Qubit* + %1 = inttoptr i64 2 to %Qubit* + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %0, %Qubit* %1) + %2 = inttoptr i64 1 to %Qubit* + %3 = inttoptr i64 3 to %Qubit* + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %3) + %4 = inttoptr i64 0 to %Qubit* + %5 = inttoptr i64 2 to %Qubit* + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %4, %Qubit* %5) + %6 = inttoptr i64 2 to %Qubit* + %7 = inttoptr i64 1 to %Qubit* + %8 = inttoptr i64 3 to %Qubit* + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %6, %Qubit* %7, %Qubit* %8) + %9 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %leftMessage) + %10 = inttoptr i64 3 to %Qubit* + %11 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %10) + ret void +} + +declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +define internal fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %left, %Qubit* %right) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %left) + call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %left, %Qubit* %right) + ret void +} + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr + +define internal fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { +entry: + call fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %src, %Qubit* %intermediary) + call fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) unnamed_addr { +entry: + call void @__quantum__qis__h(%Qubit* %qubit) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) unnamed_addr { +entry: + call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %left, %Qubit* %right) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %left, %Qubit* %right) + call fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %left) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) unnamed_addr { +entry: + call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) local_unnamed_addr + +declare void @__quantum__qis__h(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__x(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__z(%Qubit*) local_unnamed_addr + +declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr + +declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr + +define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { +entry: + call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + ret void +} + +define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { +entry: + call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +declare i1 @__quantum__qir__read_result(%Result*) + +declare void @__quantum__qis__mz__body(%Qubit*, %Result*) + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } From 0f56169f0b674b53a16dacb88422b08e3f9a1ebc Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 11 Aug 2021 09:54:37 +0200 Subject: [PATCH 072/106] Migrating ruleset to shared ptr --- src/Passes/Source/Rules/Factory.cpp | 19 +++++ src/Passes/Source/Rules/Factory.hpp | 26 ++++++ src/Passes/Source/Rules/ReplacementRule.hpp | 3 + src/Passes/Source/Rules/RuleSet.cpp | 89 ++++++++++++--------- src/Passes/Source/Rules/RuleSet.hpp | 9 ++- 5 files changed, 105 insertions(+), 41 deletions(-) create mode 100644 src/Passes/Source/Rules/Factory.cpp create mode 100644 src/Passes/Source/Rules/Factory.hpp diff --git a/src/Passes/Source/Rules/Factory.cpp b/src/Passes/Source/Rules/Factory.cpp new file mode 100644 index 0000000000..972bdb73ab --- /dev/null +++ b/src/Passes/Source/Rules/Factory.cpp @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Rules/Factory.hpp" + +#include "Llvm/Llvm.hpp" + +namespace microsoft { +namespace quantum { +using ReplacementRulePtr = RuleFactory::ReplacementRulePtr; + +ReplacementRulePtr RuleFactory::removeFunctionCall(String const &name) +{ + (void)name; + return nullptr; +} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Factory.hpp b/src/Passes/Source/Rules/Factory.hpp new file mode 100644 index 0000000000..54e4d4abed --- /dev/null +++ b/src/Passes/Source/Rules/Factory.hpp @@ -0,0 +1,26 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" +#include "Rules/ReplacementRule.hpp" + +#include + +namespace microsoft { +namespace quantum { + +struct RuleFactory +{ + using String = std::string; + using ReplacementRulePtr = std::shared_ptr; + + /// Single rules + + static ReplacementRulePtr removeFunctionCall(String const &name); + + /// +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/ReplacementRule.hpp b/src/Passes/Source/Rules/ReplacementRule.hpp index 657e132626..264777e997 100644 --- a/src/Passes/Source/Rules/ReplacementRule.hpp +++ b/src/Passes/Source/Rules/ReplacementRule.hpp @@ -21,8 +21,11 @@ class ReplacementRule using Replacements = std::vector>; using ReplaceFunction = std::function; + /// Constructorss and destructors + /// @{ ReplacementRule() = default; ReplacementRule(OperandPrototypePtr &&pattern, ReplaceFunction &&replacer); + /// @} /// Rule configuration /// @{ diff --git a/src/Passes/Source/Rules/RuleSet.cpp b/src/Passes/Source/Rules/RuleSet.cpp index 63efcf09d3..e2683d59fa 100644 --- a/src/Passes/Source/Rules/RuleSet.cpp +++ b/src/Passes/Source/Rules/RuleSet.cpp @@ -42,7 +42,7 @@ RuleSet::RuleSet() << *type->getPointerElementType() << " " << type->isArrayTy() << "\n"; return false; }); - rules_.emplace_back(std::move(rule0)); + rules_.emplace_back(std::make_shared(std::move(rule0))); // Pattern 1 - Get array index @@ -100,7 +100,7 @@ RuleSet::RuleSet() return true; }); - rules_.emplace_back(std::move(rule1a)); + rules_.emplace_back(std::make_shared(std::move(rule1a))); ReplacementRule rule1b; rule1b.setPattern(Call("__quantum__rt__qubit_allocate")); @@ -137,7 +137,7 @@ RuleSet::RuleSet() return true; }); - rules_.emplace_back(std::move(rule1b)); + rules_.emplace_back(std::make_shared(std::move(rule1b))); // Rule 6 - perform static allocation and delete __quantum__rt__qubit_allocate_array ReplacementRule rule6; @@ -160,7 +160,7 @@ RuleSet::RuleSet() return true; }); - rules_.emplace_back(std::move(rule6)); + rules_.emplace_back(std::make_shared(std::move(rule6))); // Rule 8 - standard array allocation /* @@ -203,7 +203,7 @@ RuleSet::RuleSet() << "\n"; return false; }); - rules_.emplace_back(std::move(rule10)); + rules_.emplace_back(std::make_shared(std::move(rule10))); // Measurements auto replace_measurement = [result_alloc_manager](Builder &builder, Value *val, Captures &cap, @@ -266,7 +266,8 @@ RuleSet::RuleSet() return true; }; - rules_.emplace_back(Call("__quantum__qis__m__body", "qubit"_cap = _), replace_measurement); + rules_.emplace_back(std::make_shared( + Call("__quantum__qis__m__body", "qubit"_cap = _), replace_measurement)); // Quantum comparisons auto get_one = Call("__quantum__rt__result_get_one"); @@ -315,41 +316,53 @@ RuleSet::RuleSet() }; // Variations of get_one - rules_.emplace_back(Branch("cond"_cap = Call("__quantum__rt__result_equal", "result"_cap = _, - "one"_cap = get_one), - _, _), - replace_branch_positive); - rules_.emplace_back(Branch("cond"_cap = Call("__quantum__rt__result_equal", "one"_cap = get_one, - "result"_cap = _), - _, _), - replace_branch_positive); + rules_.emplace_back(std::make_shared( + Branch( + "cond"_cap = Call("__quantum__rt__result_equal", "result"_cap = _, "one"_cap = get_one), + _, _), + replace_branch_positive)); + + rules_.emplace_back(std::make_shared( + Branch( + "cond"_cap = Call("__quantum__rt__result_equal", "one"_cap = get_one, "result"_cap = _), + _, _), + replace_branch_positive)); auto deleter = deleteInstruction(); - rules_.emplace_back(Call("__quantum__rt__qubit_release_array", "name"_cap = _), - [qubit_alloc_manager, deleter](Builder &builder, Value *val, Captures &cap, - Replacements &rep) { - qubit_alloc_manager->release(cap["name"]->getName().str()); - return deleter(builder, val, cap, rep); - } + rules_.emplace_back(std::make_shared( + Call("__quantum__rt__qubit_release_array", "name"_cap = _), + [qubit_alloc_manager, deleter](Builder &builder, Value *val, Captures &cap, + Replacements &rep) { + qubit_alloc_manager->release(cap["name"]->getName().str()); + return deleter(builder, val, cap, rep); + } + + )); - ); - rules_.emplace_back(Call("__quantum__rt__qubit_release", _), deleteInstruction()); + rules_.emplace_back(std::make_shared(Call("__quantum__rt__qubit_release", _), + deleteInstruction())); // Functions that we do not care about - rules_.emplace_back(Call("__quantum__rt__array_update_alias_count", _, _), deleteInstruction()); - rules_.emplace_back(Call("__quantum__rt__string_update_alias_count", _, _), deleteInstruction()); - rules_.emplace_back(Call("__quantum__rt__result_update_alias_count", _, _), deleteInstruction()); - rules_.emplace_back(Call("__quantum__rt__array_update_reference_count", _, _), - deleteInstruction()); - rules_.emplace_back(Call("__quantum__rt__string_update_reference_count", _, _), - deleteInstruction()); - rules_.emplace_back(Call("__quantum__rt__result_update_reference_count", _, _), - deleteInstruction()); - - rules_.emplace_back(Call("__quantum__rt__string_create", _), deleteInstruction()); - rules_.emplace_back(Call("__quantum__rt__string_release", _), deleteInstruction()); - - rules_.emplace_back(Call("__quantum__rt__message", _), deleteInstruction()); + rules_.emplace_back(std::make_shared( + Call("__quantum__rt__array_update_alias_count", _, _), deleteInstruction())); + rules_.emplace_back(std::make_shared( + Call("__quantum__rt__string_update_alias_count", _, _), deleteInstruction())); + rules_.emplace_back(std::make_shared( + Call("__quantum__rt__result_update_alias_count", _, _), deleteInstruction())); + rules_.emplace_back(std::make_shared( + Call("__quantum__rt__array_update_reference_count", _, _), deleteInstruction())); + rules_.emplace_back(std::make_shared( + Call("__quantum__rt__string_update_reference_count", _, _), deleteInstruction())); + rules_.emplace_back(std::make_shared( + Call("__quantum__rt__result_update_reference_count", _, _), deleteInstruction())); + + rules_.emplace_back(std::make_shared(Call("__quantum__rt__string_create", _), + deleteInstruction())); + rules_.emplace_back(std::make_shared(Call("__quantum__rt__string_release", _), + deleteInstruction())); + + rules_.emplace_back( + std::make_shared(Call("__quantum__rt__message", _), deleteInstruction())); } bool RuleSet::matchAndReplace(Instruction *value, Replacements &replacements) @@ -358,12 +371,12 @@ bool RuleSet::matchAndReplace(Instruction *value, Replacements &replacements) for (auto const &rule : rules_) { // Checking if the rule is matched and keep track of captured nodes - if (rule.match(value, captures)) + if (rule->match(value, captures)) { // If it is matched, we attempt to replace it llvm::IRBuilder<> builder{value}; - if (rule.replace(builder, value, captures, replacements)) + if (rule->replace(builder, value, captures, replacements)) { return true; } diff --git a/src/Passes/Source/Rules/RuleSet.hpp b/src/Passes/Source/Rules/RuleSet.hpp index 7eeec28de6..7f92c17be9 100644 --- a/src/Passes/Source/Rules/RuleSet.hpp +++ b/src/Passes/Source/Rules/RuleSet.hpp @@ -5,6 +5,7 @@ #include "Rules/OperandPrototype.hpp" #include "Rules/ReplacementRule.hpp" +#include #include namespace microsoft { @@ -13,7 +14,8 @@ namespace quantum { class RuleSet { public: - using Rules = std::vector; + using ReplacementRulePtr = std::shared_ptr; + using Rules = std::vector; using Replacements = ReplacementRule::Replacements; using Captures = OperandPrototype::Captures; using Instruction = llvm::Instruction; @@ -23,15 +25,16 @@ class RuleSet /// @{ RuleSet(); - RuleSet(RuleSet const &) = delete; + RuleSet(RuleSet const &) = default; RuleSet(RuleSet &&) = default; ~RuleSet() = default; /// @} /// Operators /// @{ - RuleSet &operator=(RuleSet const &) = delete; + RuleSet &operator=(RuleSet const &) = default; RuleSet &operator=(RuleSet &&) = default; + // TODO(tfr): add RuleSet operator&(RuleSet const &other); /// @} bool matchAndReplace(Instruction *value, Replacements &replacements); From f1c295d294a02aea4af6ae0a404daf898ed5b9a3 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 11 Aug 2021 13:34:43 +0200 Subject: [PATCH 073/106] Refactoring how to define a QIR profile transformation --- .../LibTransformationRule.cpp | 22 +- .../TransformationRule/TransformationRule.cpp | 3 + .../TransformationRule/TransformationRule.hpp | 2 +- src/Passes/Source/Rules/Factory.cpp | 310 ++++++++++++++- src/Passes/Source/Rules/Factory.hpp | 53 ++- src/Passes/Source/Rules/ReplacementRule.hpp | 6 + src/Passes/Source/Rules/RuleSet.cpp | 362 +----------------- src/Passes/Source/Rules/RuleSet.hpp | 4 +- .../examples/QubitAllocationAnalysis/Makefile | 6 +- .../examples/QubitAllocationAnalysis/test2.ll | 126 +++++- 10 files changed, 516 insertions(+), 378 deletions(-) diff --git a/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp b/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp index 0841f84081..31a53ccfca 100644 --- a/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp +++ b/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp @@ -3,6 +3,7 @@ #include "Llvm/Llvm.hpp" #include "Passes/TransformationRule/TransformationRule.hpp" +#include "Rules/Factory.hpp" #include #include @@ -18,9 +19,26 @@ llvm::PassPluginLibraryInfo getTransformationRulePluginInfo() // Registering the pass pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, ArrayRef /*unused*/) { - if (name == "transformation-rule") + // Base profile + if (name == "restrict-qir") { - fpm.addPass(TransformationRulePass()); + RuleSet rule_set; + + // Defining the mapping + auto factory = RuleFactory(rule_set); + + factory.useStaticQuantumArrayAllocation(); + factory.useStaticQuantumAllocation(); + factory.useStaticResultAllocation(); + + factory.optimiseBranchQuatumOne(); + // factory.optimiseBranchQuatumZero(); + + factory.disableReferenceCounting(); + factory.disableAliasCounting(); + factory.disableStringSupport(); + + fpm.addPass(TransformationRulePass(std::move(rule_set))); return true; } diff --git a/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp b/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp index 2c3ed36aa6..574cade548 100644 --- a/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp +++ b/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp @@ -10,6 +10,9 @@ namespace microsoft { namespace quantum { +TransformationRulePass::TransformationRulePass(RuleSet &&rule_set) + : rule_set_{std::move(rule_set)} +{} llvm::PreservedAnalyses TransformationRulePass::run(llvm::Function &function, llvm::FunctionAnalysisManager & /*fam*/) diff --git a/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp b/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp index 969820899b..e1e57db3f4 100644 --- a/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp +++ b/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp @@ -22,7 +22,7 @@ class TransformationRulePass : public llvm::PassInfoMixin(cap["size"]); + if (cst == nullptr) + { + return false; + } + + auto llvm_size = cst->getValue(); + auto name = val->getName().str(); + qubit_alloc_manager->allocate(name, llvm_size.getZExtValue()); + + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }; + addRule({Call("__quantum__rt__qubit_allocate_array", "size"_cap = _), allocation_replacer}); + + /// Array access replacement + auto access_replacer = [qubit_alloc_manager](Builder &builder, Value *val, Captures &cap, + Replacements &replacements) { + // Getting the type pointer + + auto ptr_type = llvm::dyn_cast(val->getType()); + if (ptr_type == nullptr) + { + return false; + } + + // Get the index and testing that it is a constant int + auto cst = llvm::dyn_cast(cap["index"]); + if (cst == nullptr) + { + // ... if not, we cannot perform the mapping. + return false; + } + + // Computing the index by getting the current index value and offseting by + // the offset at which the qubit array is allocated. + auto llvm_size = cst->getValue(); + auto offset = qubit_alloc_manager->getOffset(cap["arrayName"]->getName().str()); + + // Creating a new index APInt that is shifted by the offset of the allocation + auto idx = llvm::APInt(llvm_size.getBitWidth(), llvm_size.getZExtValue() + offset); + + // Computing offset + auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); + + // TODO(tfr): Understand what the significance of the addressspace is in relation to the + // QIR. Activate by uncommenting: + // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); + auto instr = new llvm::IntToPtrInst(new_index, ptr_type); + instr->takeName(val); + + // Replacing the instruction with new instruction + replacements.push_back({llvm::dyn_cast(val), instr}); + + // Deleting the getelement and cast operations + replacements.push_back({llvm::dyn_cast(cap["getElement"]), nullptr}); + replacements.push_back({llvm::dyn_cast(cap["cast"]), nullptr}); + + return true; + }; + + auto get_element = + Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); + auto cast_pattern = BitCast("getElement"_cap = get_element); + auto load_pattern = Load("cast"_cap = cast_pattern); + + addRule({std::move(load_pattern), access_replacer}); + + /// Release replacement + auto deleter = deleteInstruction(); + addRule({Call("__quantum__rt__qubit_release_array", "name"_cap = _), + [qubit_alloc_manager, deleter](Builder &builder, Value *val, Captures &cap, + Replacements &rep) { + qubit_alloc_manager->release(cap["name"]->getName().str()); + return deleter(builder, val, cap, rep); + } + + }); +} + +void RuleFactory::useStaticQuantumAllocation() +{ + auto qubit_alloc_manager = qubit_alloc_manager_; + auto allocation_replacer = [qubit_alloc_manager](Builder &builder, Value *val, Captures &, + Replacements &replacements) { + // Getting the type pointer + auto ptr_type = llvm::dyn_cast(val->getType()); + if (ptr_type == nullptr) + { + return false; + } + + // Computing the index by getting the current index value and offseting by + // the offset at which the qubit array is allocated. + auto offset = qubit_alloc_manager->allocate(); + + // Creating a new index APInt that is shifted by the offset of the allocation + // TODO(tfr): Get the bitwidth size from somewhere + auto idx = llvm::APInt(64, offset); + + // Computing offset + auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); + + // TODO(tfr): Understand what the significance of the addressspace is in relation to the + // QIR. Activate by uncommenting: + // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); + auto instr = new llvm::IntToPtrInst(new_index, ptr_type); + instr->takeName(val); + + // Replacing the instruction with new instruction + replacements.push_back({llvm::dyn_cast(val), instr}); + + return true; + }; + addRule({Call("__quantum__rt__qubit_allocate"), allocation_replacer}); + + // Removing release calls + removeFunctionCall("__quantum__rt__qubit_release"); +} + +void RuleFactory::useStaticResultAllocation() +{ + auto result_alloc_manager = result_alloc_manager_; + auto replace_measurement = [result_alloc_manager](Builder &builder, Value *val, Captures &cap, + Replacements &replacements) { + // Getting the type pointer + auto ptr_type = llvm::dyn_cast(val->getType()); + if (ptr_type == nullptr) + { + return false; + } + + // Computing the index by getting the current index value and offseting by + // the offset at which the qubit array is allocated. + auto offset = result_alloc_manager->allocate(); + + // Creating a new index APInt that is shifted by the offset of the allocation + // TODO(tfr): Get the bitwidth size from somewhere + auto idx = llvm::APInt(64, offset); + + // Computing offset + auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); + + // TODO(tfr): Understand what the significance of the addressspace is in relation to the + // QIR. Activate by uncommenting: + // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); + auto instr = new llvm::IntToPtrInst(new_index, ptr_type); + instr->takeName(val); + + auto module = llvm::dyn_cast(val)->getModule(); + auto function = module->getFunction("__quantum__qis__mz__body"); + + std::vector arguments; + arguments.push_back(cap["qubit"]); + arguments.push_back(instr); + + if (!function) + { + std::vector types; + for (auto &arg : arguments) + { + types.push_back(arg->getType()); + } + + auto return_type = llvm::Type::getVoidTy(val->getContext()); + + llvm::FunctionType *fnc_type = llvm::FunctionType::get(return_type, types, false); + function = llvm::Function::Create(fnc_type, llvm::Function::ExternalLinkage, + "__quantum__qis__mz__body", module); + } + + // Ensuring we are inserting after the instruction being deleted + builder.SetInsertPoint(llvm::dyn_cast(val)->getNextNode()); + + builder.CreateCall(function, arguments); + + // Replacing the instruction with new instruction + // TODO: (tfr): insert instruction before and then replace, with new call + replacements.push_back({llvm::dyn_cast(val), instr}); + + return true; + }; + + addRule({Call("__quantum__qis__m__body", "qubit"_cap = _), std::move(replace_measurement)}); +} + +void RuleFactory::optimiseBranchQuatumOne() +{ + auto get_one = Call("__quantum__rt__result_get_one"); + auto replace_branch_positive = [](Builder &builder, Value *val, Captures &cap, + Replacements &replacements) { + auto result = cap["result"]; + auto cond = llvm::dyn_cast(cap["cond"]); + // Replacing result + auto module = llvm::dyn_cast(val)->getModule(); + auto function = module->getFunction("__quantum__qir__read_result"); + std::vector arguments; + arguments.push_back(result); + + if (!function) + { + std::vector types; + for (auto &arg : arguments) + { + types.push_back(arg->getType()); + } + + auto return_type = llvm::Type::getInt1Ty(val->getContext()); + + llvm::FunctionType *fnc_type = llvm::FunctionType::get(return_type, types, false); + function = llvm::Function::Create(fnc_type, llvm::Function::ExternalLinkage, + "__quantum__qir__read_result", module); + } + auto result_inst = llvm::dyn_cast(result); + + builder.SetInsertPoint(result_inst->getNextNode()); + auto new_call = builder.CreateCall(function, arguments); + new_call->takeName(cond); + + for (auto &use : cond->uses()) + { + llvm::User *user = use.getUser(); + user->setOperand(use.getOperandNo(), new_call); + } + cond->replaceAllUsesWith(new_call); + + // Deleting the previous condition and function to fetch one + replacements.push_back({cond, nullptr}); + replacements.push_back({cap["one"], nullptr}); + + return false; + }; + + // Variations of get_one + addRule({Branch("cond"_cap = + Call("__quantum__rt__result_equal", "result"_cap = _, "one"_cap = get_one), + _, _), + replace_branch_positive}); + + addRule({Branch("cond"_cap = + Call("__quantum__rt__result_equal", "one"_cap = get_one, "result"_cap = _), + _, _), + replace_branch_positive}); +} + +void RuleFactory::disableReferenceCounting() +{ + removeFunctionCall("__quantum__rt__array_update_reference_count"); + removeFunctionCall("__quantum__rt__string_update_reference_count"); + removeFunctionCall("__quantum__rt__result_update_reference_count"); + + removeFunctionCall("__quantum__rt__string_create"); + removeFunctionCall("__quantum__rt__string_release"); + removeFunctionCall("__quantum__rt__message"); +} + +void RuleFactory::disableAliasCounting() +{ + removeFunctionCall("__quantum__rt__array_update_alias_count"); + removeFunctionCall("__quantum__rt__string_update_alias_count"); + removeFunctionCall("__quantum__rt__result_update_alias_count"); +} + +void RuleFactory::disableStringSupport() +{ + removeFunctionCall("__quantum__rt__string_create"); + removeFunctionCall("__quantum__rt__string_release"); + removeFunctionCall("__quantum__rt__message"); +} + +ReplacementRulePtr RuleFactory::addRule(ReplacementRule &&rule) +{ + auto ret = std::make_shared(std::move(rule)); + + rule_set_.addRule(ret); + + return ret; } } // namespace quantum diff --git a/src/Passes/Source/Rules/Factory.hpp b/src/Passes/Source/Rules/Factory.hpp index 54e4d4abed..3dc9ff7563 100644 --- a/src/Passes/Source/Rules/Factory.hpp +++ b/src/Passes/Source/Rules/Factory.hpp @@ -2,8 +2,10 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +#include "AllocationManager/AllocationManager.hpp" #include "Llvm/Llvm.hpp" #include "Rules/ReplacementRule.hpp" +#include "Rules/RuleSet.hpp" #include @@ -12,14 +14,55 @@ namespace quantum { struct RuleFactory { - using String = std::string; - using ReplacementRulePtr = std::shared_ptr; +public: + using String = std::string; + using ReplacementRulePtr = std::shared_ptr; + using AllocationManagerPtr = AllocationManager::AllocationManagerPtr; + using Replacements = ReplacementRule::Replacements; + using Captures = OperandPrototype::Captures; + using Instruction = llvm::Instruction; + using Value = llvm::Value; + using Builder = ReplacementRule::Builder; - /// Single rules + RuleFactory(RuleSet &rule_set); - static ReplacementRulePtr removeFunctionCall(String const &name); + /// Generic rules + void removeFunctionCall(String const &name); - /// + /// Conventions + /// @{ + void useStaticQuantumArrayAllocation(); + void useStaticQuantumAllocation(); + void useStaticResultAllocation(); + /// @} + + /// Optimisations + /// @{ + void optimiseBranchQuatumOne(); + void optimiseBranchQuatumZero(); + /// @} + + /// Disabling by feature + /// @{ + void disableReferenceCounting(); + void disableAliasCounting(); + void disableStringSupport(); + // TODO: void disableDynamicQuantumAllocation(); + /// @} + + AllocationManagerPtr qubitAllocationManager() const; + AllocationManagerPtr resultAllocationManager() const; + +private: + ReplacementRulePtr addRule(ReplacementRule &&rule); + + RuleSet &rule_set_; + + /// Allocation managers + /// @{ + AllocationManagerPtr qubit_alloc_manager_{nullptr}; + AllocationManagerPtr result_alloc_manager_{nullptr}; + /// @} }; } // namespace quantum diff --git a/src/Passes/Source/Rules/ReplacementRule.hpp b/src/Passes/Source/Rules/ReplacementRule.hpp index 264777e997..5840a9e476 100644 --- a/src/Passes/Source/Rules/ReplacementRule.hpp +++ b/src/Passes/Source/Rules/ReplacementRule.hpp @@ -64,6 +64,12 @@ inline OperandPrototypePtr Call(std::string const &name, Args... args) return ret; } +inline OperandPrototypePtr CallByNameOnly(std::string const &name) +{ + OperandPrototypePtr ret = std::make_shared(name); + return ret; +} + inline OperandPrototypePtr BitCast(OperandPrototypePtr arg) { auto cast_pattern = std::make_shared(); diff --git a/src/Passes/Source/Rules/RuleSet.cpp b/src/Passes/Source/Rules/RuleSet.cpp index e2683d59fa..715bc6c5d0 100644 --- a/src/Passes/Source/Rules/RuleSet.cpp +++ b/src/Passes/Source/Rules/RuleSet.cpp @@ -2,6 +2,7 @@ #include "AllocationManager/AllocationManager.hpp" #include "Llvm/Llvm.hpp" +#include "Rules/Factory.hpp" #include "Rules/ReplacementRule.hpp" #include @@ -9,362 +10,6 @@ namespace microsoft { namespace quantum { -RuleSet::RuleSet() -{ - - using namespace microsoft::quantum::patterns; - - // Shared pointer to be captured in the lambdas of the patterns - // Note that you cannot capture this as the reference is destroyed upon - // copy. Since PassInfoMixin requires copy, such a construct would break - auto qubit_alloc_manager = AllocationManager::createNew(); - auto result_alloc_manager = AllocationManager::createNew(); - - // Pattern 0 - Find type - ReplacementRule rule0; - - auto get_element = - Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); - rule0.setPattern("cast"_cap = BitCast("getElement"_cap = get_element)); - rule0.setReplacer([qubit_alloc_manager](Builder &, Value *, Captures &cap, Replacements &) { - auto type = cap["cast"]->getType(); - - // This rule only deals with access to arrays of opaque types - auto ptr_type = llvm::dyn_cast(type); - if (ptr_type == nullptr) - { - return false; - } - - auto array = cap["arrayName"]; - - llvm::errs() << *array->getType() << " of " << *type << " " << type->isPointerTy() << " " - << *type->getPointerElementType() << " " << type->isArrayTy() << "\n"; - return false; - }); - rules_.emplace_back(std::make_shared(std::move(rule0))); - - // Pattern 1 - Get array index - - // auto get_element = - // Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); - auto cast_pattern = BitCast("getElement"_cap = get_element); - auto load_pattern = Load("cast"_cap = cast_pattern); - - // Rule 1 - ReplacementRule rule1a; - rule1a.setPattern(std::move(load_pattern)); - - // Replacement details - rule1a.setReplacer([qubit_alloc_manager](Builder &builder, Value *val, Captures &cap, - Replacements &replacements) { - // Getting the type pointer - - auto ptr_type = llvm::dyn_cast(val->getType()); - if (ptr_type == nullptr) - { - return false; - } - - // Get the index and testing that it is a constant int - auto cst = llvm::dyn_cast(cap["index"]); - if (cst == nullptr) - { - // ... if not, we cannot perform the mapping. - return false; - } - - // Computing the index by getting the current index value and offseting by - // the offset at which the qubit array is allocated. - auto llvm_size = cst->getValue(); - auto offset = qubit_alloc_manager->getOffset(cap["arrayName"]->getName().str()); - - // Creating a new index APInt that is shifted by the offset of the allocation - auto idx = llvm::APInt(llvm_size.getBitWidth(), llvm_size.getZExtValue() + offset); - - // Computing offset - auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); - - // TODO(tfr): Understand what the significance of the addressspace is in relation to the - // QIR. Activate by uncommenting: - // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); - auto instr = new llvm::IntToPtrInst(new_index, ptr_type); - instr->takeName(val); - - // Replacing the instruction with new instruction - replacements.push_back({llvm::dyn_cast(val), instr}); - - // Deleting the getelement and cast operations - replacements.push_back({llvm::dyn_cast(cap["getElement"]), nullptr}); - replacements.push_back({llvm::dyn_cast(cap["cast"]), nullptr}); - - return true; - }); - rules_.emplace_back(std::make_shared(std::move(rule1a))); - - ReplacementRule rule1b; - rule1b.setPattern(Call("__quantum__rt__qubit_allocate")); - - // Replacement details - rule1b.setReplacer( - [qubit_alloc_manager](Builder &builder, Value *val, Captures &, Replacements &replacements) { - // Getting the type pointer - auto ptr_type = llvm::dyn_cast(val->getType()); - if (ptr_type == nullptr) - { - return false; - } - - // Computing the index by getting the current index value and offseting by - // the offset at which the qubit array is allocated. - auto offset = qubit_alloc_manager->allocate(); - - // Creating a new index APInt that is shifted by the offset of the allocation - // TODO(tfr): Get the bitwidth size from somewhere - auto idx = llvm::APInt(64, offset); - - // Computing offset - auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); - - // TODO(tfr): Understand what the significance of the addressspace is in relation to the - // QIR. Activate by uncommenting: - // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); - auto instr = new llvm::IntToPtrInst(new_index, ptr_type); - instr->takeName(val); - - // Replacing the instruction with new instruction - replacements.push_back({llvm::dyn_cast(val), instr}); - - return true; - }); - rules_.emplace_back(std::make_shared(std::move(rule1b))); - - // Rule 6 - perform static allocation and delete __quantum__rt__qubit_allocate_array - ReplacementRule rule6; - auto allocate_call = Call("__quantum__rt__qubit_allocate_array", "size"_cap = _); - rule6.setPattern(std::move(allocate_call)); - - rule6.setReplacer( - [qubit_alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { - auto cst = llvm::dyn_cast(cap["size"]); - if (cst == nullptr) - { - return false; - } - - auto llvm_size = cst->getValue(); - auto name = val->getName().str(); - qubit_alloc_manager->allocate(name, llvm_size.getZExtValue()); - - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - - rules_.emplace_back(std::make_shared(std::move(rule6))); - - // Rule 8 - standard array allocation - /* - ReplacementRule rule8; - auto allocate_array_call = - Call("__quantum__rt__array_create_1d", "elementSize"_cap = _, "size"_cap = _); - rule8.setPattern(std::move(allocate_array_call)); - - rule8.setReplacer( - [qubit_alloc_manager](Builder &, Value *val, Captures &cap, Replacements &replacements) { - auto cst = llvm::dyn_cast(cap["size"]); - if (cst == nullptr) - { - return false; - } - - auto llvm_size = cst->getValue(); - qubit_alloc_manager->allocate(val->getName().str(), llvm_size.getZExtValue(), true); - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }); - - rules_.emplace_back(std::move(rule8)); -*/ - // Rule 10 - track stored values - auto get_target_element = Call("__quantum__rt__array_get_element_ptr_1d", - "targetArrayName"_cap = _, "targetIndex"_cap = _); - auto get_value_element = Call("__quantum__rt__array_get_element_ptr_1d", "valueArrayName"_cap = _, - "targetValue"_cap = _); - auto target = BitCast("target"_cap = get_target_element); - auto value = BitCast("value"_cap = get_element); - - auto store_pattern = Store(target, value); - - ReplacementRule rule10; - rule10.setPattern(std::move(store_pattern)); - - rule10.setReplacer([qubit_alloc_manager](Builder &, Value *, Captures &, Replacements &) { - llvm::errs() << "Found store pattern" - << "\n"; - return false; - }); - rules_.emplace_back(std::make_shared(std::move(rule10))); - - // Measurements - auto replace_measurement = [result_alloc_manager](Builder &builder, Value *val, Captures &cap, - Replacements &replacements) { - // Getting the type pointer - auto ptr_type = llvm::dyn_cast(val->getType()); - if (ptr_type == nullptr) - { - return false; - } - - // Computing the index by getting the current index value and offseting by - // the offset at which the qubit array is allocated. - auto offset = result_alloc_manager->allocate(); - - // Creating a new index APInt that is shifted by the offset of the allocation - // TODO(tfr): Get the bitwidth size from somewhere - auto idx = llvm::APInt(64, offset); - - // Computing offset - auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); - - // TODO(tfr): Understand what the significance of the addressspace is in relation to the - // QIR. Activate by uncommenting: - // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); - auto instr = new llvm::IntToPtrInst(new_index, ptr_type); - instr->takeName(val); - - auto module = llvm::dyn_cast(val)->getModule(); - auto function = module->getFunction("__quantum__qis__mz__body"); - - std::vector arguments; - arguments.push_back(cap["qubit"]); - arguments.push_back(instr); - - if (!function) - { - std::vector types; - for (auto &arg : arguments) - { - types.push_back(arg->getType()); - } - - auto return_type = llvm::Type::getVoidTy(val->getContext()); - - llvm::FunctionType *fnc_type = llvm::FunctionType::get(return_type, types, false); - function = llvm::Function::Create(fnc_type, llvm::Function::ExternalLinkage, - "__quantum__qis__mz__body", module); - } - - // Ensuring we are inserting after the instruction being deleted - builder.SetInsertPoint(llvm::dyn_cast(val)->getNextNode()); - - builder.CreateCall(function, arguments); - - // Replacing the instruction with new instruction - // TODO: (tfr): insert instruction before and then replace, with new call - replacements.push_back({llvm::dyn_cast(val), instr}); - - return true; - }; - - rules_.emplace_back(std::make_shared( - Call("__quantum__qis__m__body", "qubit"_cap = _), replace_measurement)); - - // Quantum comparisons - auto get_one = Call("__quantum__rt__result_get_one"); - auto replace_branch_positive = [](Builder &builder, Value *val, Captures &cap, - Replacements &replacements) { - auto result = cap["result"]; - auto cond = llvm::dyn_cast(cap["cond"]); - // Replacing result - auto module = llvm::dyn_cast(val)->getModule(); - auto function = module->getFunction("__quantum__qir__read_result"); - std::vector arguments; - arguments.push_back(result); - - if (!function) - { - std::vector types; - for (auto &arg : arguments) - { - types.push_back(arg->getType()); - } - - auto return_type = llvm::Type::getInt1Ty(val->getContext()); - - llvm::FunctionType *fnc_type = llvm::FunctionType::get(return_type, types, false); - function = llvm::Function::Create(fnc_type, llvm::Function::ExternalLinkage, - "__quantum__qir__read_result", module); - } - auto result_inst = llvm::dyn_cast(result); - - builder.SetInsertPoint(result_inst->getNextNode()); - auto new_call = builder.CreateCall(function, arguments); - new_call->takeName(cond); - - for (auto &use : cond->uses()) - { - llvm::User *user = use.getUser(); - user->setOperand(use.getOperandNo(), new_call); - } - cond->replaceAllUsesWith(new_call); - - // Deleting the previous condition and function to fetch one - replacements.push_back({cond, nullptr}); - replacements.push_back({cap["one"], nullptr}); - - return false; - }; - - // Variations of get_one - rules_.emplace_back(std::make_shared( - Branch( - "cond"_cap = Call("__quantum__rt__result_equal", "result"_cap = _, "one"_cap = get_one), - _, _), - replace_branch_positive)); - - rules_.emplace_back(std::make_shared( - Branch( - "cond"_cap = Call("__quantum__rt__result_equal", "one"_cap = get_one, "result"_cap = _), - _, _), - replace_branch_positive)); - - auto deleter = deleteInstruction(); - rules_.emplace_back(std::make_shared( - Call("__quantum__rt__qubit_release_array", "name"_cap = _), - [qubit_alloc_manager, deleter](Builder &builder, Value *val, Captures &cap, - Replacements &rep) { - qubit_alloc_manager->release(cap["name"]->getName().str()); - return deleter(builder, val, cap, rep); - } - - )); - - rules_.emplace_back(std::make_shared(Call("__quantum__rt__qubit_release", _), - deleteInstruction())); - - // Functions that we do not care about - rules_.emplace_back(std::make_shared( - Call("__quantum__rt__array_update_alias_count", _, _), deleteInstruction())); - rules_.emplace_back(std::make_shared( - Call("__quantum__rt__string_update_alias_count", _, _), deleteInstruction())); - rules_.emplace_back(std::make_shared( - Call("__quantum__rt__result_update_alias_count", _, _), deleteInstruction())); - rules_.emplace_back(std::make_shared( - Call("__quantum__rt__array_update_reference_count", _, _), deleteInstruction())); - rules_.emplace_back(std::make_shared( - Call("__quantum__rt__string_update_reference_count", _, _), deleteInstruction())); - rules_.emplace_back(std::make_shared( - Call("__quantum__rt__result_update_reference_count", _, _), deleteInstruction())); - - rules_.emplace_back(std::make_shared(Call("__quantum__rt__string_create", _), - deleteInstruction())); - rules_.emplace_back(std::make_shared(Call("__quantum__rt__string_release", _), - deleteInstruction())); - - rules_.emplace_back( - std::make_shared(Call("__quantum__rt__message", _), deleteInstruction())); -} - bool RuleSet::matchAndReplace(Instruction *value, Replacements &replacements) { Captures captures; @@ -385,5 +30,10 @@ bool RuleSet::matchAndReplace(Instruction *value, Replacements &replacements) return false; } +void RuleSet::addRule(ReplacementRulePtr const &rule) +{ + rules_.push_back(rule); +} + } // namespace quantum } // namespace microsoft diff --git a/src/Passes/Source/Rules/RuleSet.hpp b/src/Passes/Source/Rules/RuleSet.hpp index 7f92c17be9..a7d0a8c11d 100644 --- a/src/Passes/Source/Rules/RuleSet.hpp +++ b/src/Passes/Source/Rules/RuleSet.hpp @@ -24,7 +24,7 @@ class RuleSet using AllocationManagerPtr = AllocationManager::AllocationManagerPtr; /// @{ - RuleSet(); + RuleSet() = default; RuleSet(RuleSet const &) = default; RuleSet(RuleSet &&) = default; ~RuleSet() = default; @@ -39,6 +39,8 @@ class RuleSet bool matchAndReplace(Instruction *value, Replacements &replacements); + void addRule(ReplacementRulePtr const &rule); + private: Rules rules_; ///< Rules that describes QIR mappings }; diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index 7815e611e2..4924796194 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -8,9 +8,9 @@ run: build-qaa analysis-example.ll run-replace: build-ir build-qaa build-esa analysis-example.ll # opt -loop-unroll -unroll-count=3 -unroll-allow-partial -# opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib \ -# -load-pass-plugin ../../Debug/Source/Passes/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll > test1.ll - opt -load-pass-plugin ../../Debug/Source/Passes/libTransformationRule.dylib --passes="mem2reg,simplifycfg,loop-simplify,loop-unroll,transformation-rule" -S analysis-example.ll > test2.ll + opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib \ + -load-pass-plugin ../../Debug/Source/Passes/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll > test1.ll + opt -load-pass-plugin ../../Debug/Source/Passes/libTransformationRule.dylib --passes="mem2reg,simplifycfg,loop-simplify,loop-unroll,restrict-qir" -S test1.ll > test2.ll opt --passes="inline" -S test2.ll | opt -O1 -S diff --git a/src/Passes/examples/QubitAllocationAnalysis/test2.ll b/src/Passes/examples/QubitAllocationAnalysis/test2.ll index 906107066b..d3a835c836 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/test2.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/test2.ll @@ -1,4 +1,4 @@ -; ModuleID = 'analysis-example.ll' +; ModuleID = 'test1.ll' source_filename = "qir/ConstSizeArray.ll" %Qubit = type opaque @@ -10,7 +10,7 @@ source_filename = "qir/ConstSizeArray.ll" define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { entry: - %0 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) + %0 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.1(%Qubit* %src) %1 = call i1 @__quantum__qir__read_result(%Result* %0) br i1 %1, label %then0__1, label %continue__1 @@ -19,7 +19,7 @@ then0__1: ; preds = %entry br label %continue__1 continue__1: ; preds = %then0__1, %entry - %2 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %intermediary) + %2 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.2(%Qubit* %intermediary) %3 = call i1 @__quantum__qir__read_result(%Result* %2) br i1 %3, label %then0__2, label %continue__2 @@ -75,9 +75,9 @@ entry: %7 = inttoptr i64 1 to %Qubit* %8 = inttoptr i64 3 to %Qubit* call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %6, %Qubit* %7, %Qubit* %8) - %9 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %leftMessage) + %9 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3(%Qubit* %leftMessage) %10 = inttoptr i64 3 to %Qubit* - %11 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %10) + %11 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4(%Qubit* %10) ret void } @@ -154,13 +154,13 @@ declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { entry: - call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + call void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.5() ret void } define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { entry: - call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + call void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.6() ret void } @@ -168,6 +168,118 @@ declare void @__quantum__rt__message(%String*) local_unnamed_addr declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.1(%Qubit* %target) unnamed_addr { +entry: + %result = inttoptr i64 1 to %Result* + call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.2(%Qubit* %target) unnamed_addr { +entry: + %result = inttoptr i64 2 to %Result* + call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3(%Qubit* %target) unnamed_addr { +entry: + %result = inttoptr i64 3 to %Result* + call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4(%Qubit* %target) unnamed_addr { +entry: + %result = inttoptr i64 4 to %Result* + call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.5() unnamed_addr { +entry: + %leftMessage = inttoptr i64 4 to %Qubit* + %rightMessage = inttoptr i64 5 to %Qubit* + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) + %0 = inttoptr i64 6 to %Qubit* + %1 = inttoptr i64 8 to %Qubit* + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %0, %Qubit* %1) + %2 = inttoptr i64 7 to %Qubit* + %3 = inttoptr i64 9 to %Qubit* + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %3) + %4 = inttoptr i64 6 to %Qubit* + %5 = inttoptr i64 8 to %Qubit* + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %4, %Qubit* %5) + %6 = inttoptr i64 8 to %Qubit* + %7 = inttoptr i64 7 to %Qubit* + %8 = inttoptr i64 9 to %Qubit* + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %6, %Qubit* %7, %Qubit* %8) + %9 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.7(%Qubit* %leftMessage) + %10 = inttoptr i64 9 to %Qubit* + %11 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.8(%Qubit* %10) + ret void +} + +define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.6() unnamed_addr { +entry: + %leftMessage = inttoptr i64 10 to %Qubit* + %rightMessage = inttoptr i64 11 to %Qubit* + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) + %0 = inttoptr i64 12 to %Qubit* + %1 = inttoptr i64 14 to %Qubit* + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %0, %Qubit* %1) + %2 = inttoptr i64 13 to %Qubit* + %3 = inttoptr i64 15 to %Qubit* + call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %3) + %4 = inttoptr i64 12 to %Qubit* + %5 = inttoptr i64 14 to %Qubit* + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %4, %Qubit* %5) + %6 = inttoptr i64 14 to %Qubit* + %7 = inttoptr i64 13 to %Qubit* + %8 = inttoptr i64 15 to %Qubit* + call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %6, %Qubit* %7, %Qubit* %8) + %9 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.9(%Qubit* %leftMessage) + %10 = inttoptr i64 15 to %Qubit* + %11 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.10(%Qubit* %10) + ret void +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.7(%Qubit* %target) unnamed_addr { +entry: + %result = inttoptr i64 5 to %Result* + call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.8(%Qubit* %target) unnamed_addr { +entry: + %result = inttoptr i64 6 to %Result* + call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.9(%Qubit* %target) unnamed_addr { +entry: + %result = inttoptr i64 7 to %Result* + call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.10(%Qubit* %target) unnamed_addr { +entry: + %result = inttoptr i64 8 to %Result* + call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + declare i1 @__quantum__qir__read_result(%Result*) declare void @__quantum__qis__mz__body(%Qubit*, %Result*) From df87cd1d093f45a60bd83974c5163548569b85a6 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 11 Aug 2021 15:36:40 +0200 Subject: [PATCH 074/106] Removing orig files --- src/Passes/CMakeLists.txt.orig | 60 -- src/Passes/Source/Apps/Qat/Qat.cpp | 2 +- src/Passes/Source/Llvm/Llvm.hpp.orig | 65 -- .../ClassicalIrCommandline/README.md.orig | 45 -- .../ConstSizeArray/ConstSizeArray.csproj.orig | 20 - .../ConstSizeArray/ConstSizeArray.qs.orig | 85 --- .../ConstSizeArray/Makefile.orig | 17 - .../QubitAllocationAnalysis/Makefile.orig | 47 -- .../analysis-example.ll.orig | 640 ------------------ 9 files changed, 1 insertion(+), 980 deletions(-) delete mode 100644 src/Passes/CMakeLists.txt.orig delete mode 100644 src/Passes/Source/Llvm/Llvm.hpp.orig delete mode 100644 src/Passes/examples/ClassicalIrCommandline/README.md.orig delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj.orig delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs.orig delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile.orig delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/Makefile.orig delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll.orig diff --git a/src/Passes/CMakeLists.txt.orig b/src/Passes/CMakeLists.txt.orig deleted file mode 100644 index 5459a689ea..0000000000 --- a/src/Passes/CMakeLists.txt.orig +++ /dev/null @@ -1,60 +0,0 @@ -cmake_minimum_required(VERSION 3.4.3) - -<<<<<<< HEAD -project(QirPasses) -======= -project(Passes) - -if (WIN32) - message(STATUS "Adding C:\\Program Files\\LLVM\\lib to LLVM paths") - find_package(LLVM REQUIRED PATHS "C:\\Program Files\\LLVM\\lib" CONFIG ) -else() - find_package(LLVM REQUIRED CONFIG) -endif() - ->>>>>>> features/llvm-passes - -include(CheckCXXCompilerFlag) - -message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") -message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") - -# Setting the standard configuration for the C++ compiler -# Rather than allowing C++17, we restrict ourselves to -# C++14 as this is the standard currently used by the LLVM -# project for compilation of the framework. While there is -# a very small chance that the difference in standard -# would break things, it is a possibility nonetheless. -set(CMAKE_CXX_STANDARD 14) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") - -# LLVM is normally built without RTTI. Be consistent with that. -if(NOT LLVM_ENABLE_RTTI) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") -endif() - -# We export the compile commands which are needed by clang-tidy -# to run the static analysis -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -# Adding LLVM include directories. We may choose -# to move this to a module level at a later point -include_directories(${LLVM_INCLUDE_DIRS}) -link_directories(${LLVM_LIBRARY_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/Source) -llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit orcshared orctargetprocess x86asmparser x86codegen x86desc x86disassembler x86info interpreter) - - -# Adding the libraries -<<<<<<< HEAD -add_subdirectory(Source) -add_subdirectory(tests) - -======= -add_subdirectory(libs) -add_subdirectory(tests) ->>>>>>> features/llvm-passes diff --git a/src/Passes/Source/Apps/Qat/Qat.cpp b/src/Passes/Source/Apps/Qat/Qat.cpp index aea054daba..3c851b24ec 100644 --- a/src/Passes/Source/Apps/Qat/Qat.cpp +++ b/src/Passes/Source/Apps/Qat/Qat.cpp @@ -15,7 +15,7 @@ int main(int /*argc*/, char **argv) FPM.addPass(LoopSimplifyPass()); MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); - // MPM.run(*module); + MPM.run(*module); // m->print(llvm) llvm::errs() << *module << "\n"; } diff --git a/src/Passes/Source/Llvm/Llvm.hpp.orig b/src/Passes/Source/Llvm/Llvm.hpp.orig deleted file mode 100644 index 9419ae2ec3..0000000000 --- a/src/Passes/Source/Llvm/Llvm.hpp.orig +++ /dev/null @@ -1,65 +0,0 @@ -#pragma once -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#pragma GCC diagnostic ignored "-Wpedantic" -#pragma GCC diagnostic ignored "-Wunused-value" -#pragma GCC diagnostic ignored "-Wsign-compare" -#pragma GCC diagnostic ignored "-Wunknown-warning-option" -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Wall" -#pragma GCC diagnostic ignored "-Weverything" -#endif - -#if defined(__clang__) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wconversion" -#pragma clang diagnostic ignored "-Wpedantic" -#pragma clang diagnostic ignored "-Werror" -#pragma clang diagnostic ignored "-Wshadow" -#pragma clang diagnostic ignored "-Wreturn-std-move" -#pragma clang diagnostic ignored "-Wunknown-warning-option" -#pragma clang diagnostic ignored "-Wunused-parameter" -#pragma clang diagnostic ignored "-Wall" -#pragma clang diagnostic ignored "-Weverything" -#endif - -// Passes -#include "llvm/Passes/PassBuilder.h" -#include "llvm/Passes/PassPlugin.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Transforms/Utils/BasicBlockUtils.h" -#include "llvm/Transforms/Utils/Cloning.h" - -// Building -#include "llvm/IR/BasicBlock.h" -#include "llvm/IR/Constants.h" -#include "llvm/IR/DerivedTypes.h" -#include "llvm/IR/Function.h" -#include "llvm/IR/IRBuilder.h" -#include "llvm/IR/LLVMContext.h" -<<<<<<< HEAD:src/Passes/Source/Llvm/Llvm.hpp -#include "llvm/IR/Module.h" -#include "llvm/IR/Type.h" -#include "llvm/IR/Verifier.h" - -// Reader tool -#include "llvm/IRReader/IRReader.h" -#include "llvm/Support/SourceMgr.h" -======= -#include "llvm/IR/LegacyPassManager.h" -#include "llvm/IR/Module.h" -#include "llvm/IR/Type.h" -#include "llvm/IR/Verifier.h" ->>>>>>> features/llvm-passes:src/Passes/include/Llvm.hpp - -#if defined(__clang__) -#pragma clang diagnostic pop -#endif - -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif diff --git a/src/Passes/examples/ClassicalIrCommandline/README.md.orig b/src/Passes/examples/ClassicalIrCommandline/README.md.orig deleted file mode 100644 index 3bf5a57d92..0000000000 --- a/src/Passes/examples/ClassicalIrCommandline/README.md.orig +++ /dev/null @@ -1,45 +0,0 @@ -# Emitting classical IRs - -This example demonstrates how to emit a classical IR and run a custom -pass on it. The purpose of this example is to teach the user how to apply -a pass to a IR using commandline tools only. - -IRs can be represented either by a human readible language or through bytecode. For -C programs former is generated by - -```sh - clang -O1 -S -emit-llvm classical-program.c -o classical-program.ll -``` - -whereas the latter is generated by executing: - -```sh - clang -O1 -c -emit-llvm classical-program.c -o classical-program.bc -``` - -This generates a nice and short IR which makes not too overwhelming to understand what is going on. - -<<<<<<< HEAD -======= -## Legacy passes - -This part assumes that you have built the Passes library. - -```sh -opt -load ../../{Debug,Release}/libQSharpPasses.{dylib,so} -legacy-operation-counter -analyze classical-program.ll -``` - ->>>>>>> features/llvm-passes -## Next-gen passes - -This part assumes that you have built the Passes library. - -```sh -<<<<<<< HEAD -opt -load-pass-plugin ../../{Debug,Release}/libs/libOpsCounter.{dylib,so} --passes="print" -disable-output classical-program.bc -======= -opt -load-pass-plugin ../../{Debug,Release}/libs/libQSharpPasses.{dylib,so} --passes="print" -disable-output classical-program.bc ->>>>>>> features/llvm-passes -``` - -opt -O3 -S classical-program.ll diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj.orig b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj.orig deleted file mode 100644 index 3e7d7a75ea..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj.orig +++ /dev/null @@ -1,20 +0,0 @@ -<<<<<<< HEAD - -======= - ->>>>>>> features/llvm-passes - - - Exe - netcoreapp3.1 - true - - -<<<<<<< HEAD - - - - -======= ->>>>>>> features/llvm-passes - diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs.orig b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs.orig deleted file mode 100644 index 9c294523fc..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs.orig +++ /dev/null @@ -1,85 +0,0 @@ -<<<<<<< HEAD -namespace TeleportChain { - open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Canon; - open Microsoft.Quantum.Arrays; - open Microsoft.Quantum.Measurement; - open Microsoft.Quantum.Preparation; - - operation PrepareEntangledPair(left : Qubit, right : Qubit) : Unit is Adj + Ctl { - H(left); - CNOT(left, right); - } - - operation ApplyCorrection(src : Qubit, intermediary : Qubit, dest : Qubit) : Unit { - if (MResetZ(src) == One) { Z(dest); } - if (MResetZ(intermediary) == One) { X(dest); } - } - - operation TeleportQubitUsingPresharedEntanglement(src : Qubit, intermediary : Qubit, dest : Qubit) : Unit { - Adjoint PrepareEntangledPair(src, intermediary); - ApplyCorrection(src, intermediary, dest); - } - - operation TeleportQubit(src : Qubit, dest : Qubit) : Unit { - use intermediary = Qubit(); - PrepareEntangledPair(intermediary, dest); - TeleportQubitUsingPresharedEntanglement(src, intermediary, dest); - } - - operation DemonstrateEntanglementSwapping() : (Result, Result) { - use (reference, src, intermediary, dest) = (Qubit(), Qubit(), Qubit(), Qubit()); - PrepareEntangledPair(reference, src); - TeleportQubit(src, dest); - return (MResetZ(reference), MResetZ(dest)); - } - - @EntryPoint() - operation DemonstrateTeleportationUsingPresharedEntanglement() : Unit { - let nPairs = 2; - use (leftMessage, rightMessage, leftPreshared, rightPreshared) = (Qubit(), Qubit(), Qubit[nPairs], Qubit[nPairs]); - PrepareEntangledPair(leftMessage, rightMessage); - for i in 0..nPairs-1 { - PrepareEntangledPair(leftPreshared[i], rightPreshared[i]); - } - - TeleportQubitUsingPresharedEntanglement(rightMessage, leftPreshared[0], rightPreshared[0]); - for i in 1..nPairs-1 { - TeleportQubitUsingPresharedEntanglement(rightPreshared[i-1], leftPreshared[i], rightPreshared[i]); - } - - let _ = MResetZ(leftMessage); - let _ = MResetZ(rightPreshared[nPairs-1]); - // return (); -======= -namespace Example { - @EntryPoint() - operation Main() : Int - { - - QuantumProgram(3,2,1); - QuantumProgram(4,X(2),4); - return 0; - } - - function X(value: Int): Int - { - return 3 * value; - } - - operation QuantumProgram(x: Int, h: Int, g: Int) : Unit { - let z = x * (x + 1) - 47; - let y = 3 * x; - - use qubits0 = Qubit[9]; - use qubits1 = Qubit[(y - 2)/2-z]; - use qubits2 = Qubit[y - g]; - use qubits3 = Qubit[h]; - use qubits4 = Qubit[X(x)]; - - for idxIteration in 0..g { - //Message(idxIteration); - } ->>>>>>> features/llvm-passes - } -} \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile.orig b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile.orig deleted file mode 100644 index af4789c52e..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile.orig +++ /dev/null @@ -1,17 +0,0 @@ -analysis-example.ll: - dotnet build ConstSizeArray.csproj - opt -S qir/ConstSizeArray.ll -O1 > ../analysis-example.ll - make clean - -comparison: - clang++ -S -emit-llvm -std=c++14 -stdlib=libc++ Comparison.cpp -o comparison.ll - -clean: - rm -rf bin - rm -rf obj -<<<<<<< HEAD -# rm -rf qir -======= - rm -rf qir ->>>>>>> features/llvm-passes - \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile.orig b/src/Passes/examples/QubitAllocationAnalysis/Makefile.orig deleted file mode 100644 index 35f81352be..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile.orig +++ /dev/null @@ -1,47 +0,0 @@ -run-expand: build-qaa build-esa analysis-example.ll -<<<<<<< HEAD - opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib \ - -load-pass-plugin ../../Debug/Source/Passes/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll - - -run: build-qaa analysis-example.ll - opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll - -run-replace: build-ir build-qaa build-esa analysis-example.ll -# opt -loop-unroll -unroll-count=3 -unroll-allow-partial - opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib \ - -load-pass-plugin ../../Debug/Source/Passes/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll > test1.ll - opt -load-pass-plugin ../../Debug/Source/Passes/libTransformationRule.dylib --passes="mem2reg,simplifycfg,loop-simplify,loop-unroll,restrict-qir" -S test1.ll > test2.ll - opt --passes="inline" -S test2.ll | opt -O1 -S -======= - opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib \ - -load-pass-plugin ../../Debug/libs/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll - - -run: build-qaa analysis-example.ll - opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll ->>>>>>> features/llvm-passes - - -build-prepare: - pushd ../../ && mkdir -p Debug && cd Debug && cmake ..&& popd || popd - -build-qaa: build-prepare - pushd ../../Debug && make QubitAllocationAnalysis && popd || popd - -build-esa: build-prepare - pushd ../../Debug && make ExpandStaticAllocation && popd || popd - -<<<<<<< HEAD -build-ir: build-prepare - pushd ../../Debug && make TransformationRule && popd || popd - -======= ->>>>>>> features/llvm-passes - -analysis-example.ll: - cd ConstSizeArray && make analysis-example.ll - -clean: - cd ConstSizeArray && make clean - rm analysis-example.ll diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll.orig b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll.orig deleted file mode 100644 index c648c83592..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll.orig +++ /dev/null @@ -1,640 +0,0 @@ -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" - -<<<<<<< HEAD -%Qubit = type opaque -%Result = type opaque -%Array = type opaque -%String = type opaque - -@0 = internal constant [3 x i8] c"()\00" - -define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { -entry: - %0 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) - %1 = call %Result* @__quantum__rt__result_get_one() - %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) - call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) - br i1 %2, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %dest) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - %3 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %intermediary) - %4 = call %Result* @__quantum__rt__result_get_one() - %5 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %4) - call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) - br i1 %5, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - call fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %dest) - br label %continue__2 - -continue__2: ; preds = %then0__2, %continue__1 - ret void -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) unnamed_addr { -entry: - %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -declare %Result* @__quantum__rt__result_get_one() local_unnamed_addr - -declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr - -declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr - -define internal fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) unnamed_addr { -entry: - call void @__quantum__qis__z(%Qubit* %qubit) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) unnamed_addr { -entry: - call void @__quantum__qis__x(%Qubit* %qubit) - ret void -} - -define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { -entry: - %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() - %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() - %leftPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 1) - %rightPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 1) - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) - %1 = bitcast i8* %0 to %Qubit** - %2 = load %Qubit*, %Qubit** %1, align 8 - %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) - %4 = bitcast i8* %3 to %Qubit** - %5 = load %Qubit*, %Qubit** %4, align 8 - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %5) - %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) - %7 = bitcast i8* %6 to %Qubit** - %8 = load %Qubit*, %Qubit** %7, align 8 - %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %10 = bitcast i8* %9 to %Qubit** - %11 = load %Qubit*, %Qubit** %10, align 8 - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %8, %Qubit* %11) - %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) - %13 = bitcast i8* %12 to %Qubit** - %14 = load %Qubit*, %Qubit** %13, align 8 - %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) - %16 = bitcast i8* %15 to %Qubit** - %17 = load %Qubit*, %Qubit** %16, align 8 - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %14, %Qubit* %17) - %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) - %19 = bitcast i8* %18 to %Qubit** - %20 = load %Qubit*, %Qubit** %19, align 8 - %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) - %22 = bitcast i8* %21 to %Qubit** - %23 = load %Qubit*, %Qubit** %22, align 8 - %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %25 = bitcast i8* %24 to %Qubit** - %26 = load %Qubit*, %Qubit** %25, align 8 - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %20, %Qubit* %23, %Qubit* %26) - %27 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %leftMessage) - %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %29 = bitcast i8* %28 to %Qubit** - %30 = load %Qubit*, %Qubit** %29, align 8 - %31 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %30) - call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %27, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 -1) - call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) - call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) - call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) - call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) - ret void -======= -%Tuple = type opaque -%Qubit = type opaque -%Array = type opaque -%Result = type opaque -%Callable = type opaque -%String = type opaque - -@Microsoft__Quantum__Qir__Emission__M = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Qir__Emission__M__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] -@0 = internal constant [3 x i8] c", \00" -@1 = internal constant [2 x i8] c"[\00" -@2 = internal constant [2 x i8] c"]\00" - -declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) local_unnamed_addr - -declare void @__quantum__qis__cnot__adj(%Qubit*, %Qubit*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -declare %Tuple* @__quantum__rt__tuple_create(i64) local_unnamed_addr - -declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) local_unnamed_addr - -define internal fastcc %Result* @Microsoft__Quantum__Qir__Emission__M__body(%Qubit* %q) unnamed_addr { -entry: - %0 = call %Result* @__quantum__qis__m__body(%Qubit* %q) - ret %Result* %0 -} - -declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr - -define internal fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %a, %Qubit* %b, %Qubit* %c) unnamed_addr { -entry: - call void @__quantum__qis__cnot__body(%Qubit* %c, %Qubit* %b) - call void @__quantum__qis__cnot__body(%Qubit* %c, %Qubit* %a) - call void @__quantum__qis__toffoli__body(%Qubit* %a, %Qubit* %b, %Qubit* %c) - ret void -} - -declare void @__quantum__qis__toffoli__body(%Qubit*, %Qubit*, %Qubit*) local_unnamed_addr - -define internal fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %a, %Qubit* %b, %Qubit* %c) unnamed_addr { -entry: - call void @__quantum__qis__toffoli__adj(%Qubit* %a, %Qubit* %b, %Qubit* %c) - call void @__quantum__qis__cnot__adj(%Qubit* %c, %Qubit* %a) - call void @__quantum__qis__cnot__adj(%Qubit* %c, %Qubit* %b) - ret void -} - -declare void @__quantum__qis__toffoli__adj(%Qubit*, %Qubit*, %Qubit*) local_unnamed_addr - -define internal fastcc %Array* @Microsoft__Quantum__Qir__Emission__RunAdder__body() unnamed_addr { -entry: - %a = call %Array* @__quantum__rt__qubit_allocate_array(i64 4) - call void @__quantum__rt__array_update_alias_count(%Array* %a, i32 1) - %b = call %Array* @__quantum__rt__qubit_allocate_array(i64 4) - call void @__quantum__rt__array_update_alias_count(%Array* %b, i32 1) - %cin = call %Qubit* @__quantum__rt__qubit_allocate() - %cout = call %Qubit* @__quantum__rt__qubit_allocate() - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) - %1 = bitcast i8* %0 to %Qubit** - %q = load %Qubit*, %Qubit** %1, align 8 - call void @__quantum__qis__x__body(%Qubit* %q) - %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %b) - %3 = add i64 %2, -1 - %.not1 = icmp slt i64 %3, 0 - br i1 %.not1, label %exit__1, label %body__1 - -body__1: ; preds = %entry, %body__1 - %4 = phi i64 [ %7, %body__1 ], [ 0, %entry ] - %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 %4) - %6 = bitcast i8* %5 to %Qubit** - %q__1 = load %Qubit*, %Qubit** %6, align 8 - call void @__quantum__qis__x__body(%Qubit* %q__1) - %7 = add i64 %4, 1 - %.not = icmp sgt i64 %7, %3 - br i1 %.not, label %exit__1, label %body__1 - -exit__1: ; preds = %body__1, %entry - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 0) - %9 = bitcast i8* %8 to %Qubit** - %10 = load %Qubit*, %Qubit** %9, align 8 - %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) - %12 = bitcast i8* %11 to %Qubit** - %13 = load %Qubit*, %Qubit** %12, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %cin, %Qubit* %10, %Qubit* %13) - %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) - %15 = bitcast i8* %14 to %Qubit** - %16 = load %Qubit*, %Qubit** %15, align 8 - %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 1) - %18 = bitcast i8* %17 to %Qubit** - %19 = load %Qubit*, %Qubit** %18, align 8 - %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) - %21 = bitcast i8* %20 to %Qubit** - %22 = load %Qubit*, %Qubit** %21, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %16, %Qubit* %19, %Qubit* %22) - %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) - %24 = bitcast i8* %23 to %Qubit** - %25 = load %Qubit*, %Qubit** %24, align 8 - %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 2) - %27 = bitcast i8* %26 to %Qubit** - %28 = load %Qubit*, %Qubit** %27, align 8 - %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) - %30 = bitcast i8* %29 to %Qubit** - %31 = load %Qubit*, %Qubit** %30, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %25, %Qubit* %28, %Qubit* %31) - %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) - %33 = bitcast i8* %32 to %Qubit** - %34 = load %Qubit*, %Qubit** %33, align 8 - %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 3) - %36 = bitcast i8* %35 to %Qubit** - %37 = load %Qubit*, %Qubit** %36, align 8 - %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 3) - %39 = bitcast i8* %38 to %Qubit** - %40 = load %Qubit*, %Qubit** %39, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %34, %Qubit* %37, %Qubit* %40) - %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 3) - %42 = bitcast i8* %41 to %Qubit** - %c = load %Qubit*, %Qubit** %42, align 8 - call void @__quantum__qis__cnot__body(%Qubit* %c, %Qubit* %cout) - %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) - %44 = bitcast i8* %43 to %Qubit** - %45 = load %Qubit*, %Qubit** %44, align 8 - %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 3) - %47 = bitcast i8* %46 to %Qubit** - %48 = load %Qubit*, %Qubit** %47, align 8 - %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 3) - %50 = bitcast i8* %49 to %Qubit** - %51 = load %Qubit*, %Qubit** %50, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %45, %Qubit* %48, %Qubit* %51) - %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) - %53 = bitcast i8* %52 to %Qubit** - %54 = load %Qubit*, %Qubit** %53, align 8 - %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 2) - %56 = bitcast i8* %55 to %Qubit** - %57 = load %Qubit*, %Qubit** %56, align 8 - %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) - %59 = bitcast i8* %58 to %Qubit** - %60 = load %Qubit*, %Qubit** %59, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %54, %Qubit* %57, %Qubit* %60) - %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) - %62 = bitcast i8* %61 to %Qubit** - %63 = load %Qubit*, %Qubit** %62, align 8 - %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 1) - %65 = bitcast i8* %64 to %Qubit** - %66 = load %Qubit*, %Qubit** %65, align 8 - %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) - %68 = bitcast i8* %67 to %Qubit** - %69 = load %Qubit*, %Qubit** %68, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %63, %Qubit* %66, %Qubit* %69) - %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 0) - %71 = bitcast i8* %70 to %Qubit** - %72 = load %Qubit*, %Qubit** %71, align 8 - %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) - %74 = bitcast i8* %73 to %Qubit** - %75 = load %Qubit*, %Qubit** %74, align 8 - call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %cin, %Qubit* %72, %Qubit* %75) - %76 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* nonnull @Microsoft__Quantum__Qir__Emission__M, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) - %77 = call fastcc %Array* @Microsoft__Quantum__Qir__Emission___73da7dcac81a47ddabb1a0e30be3dfdb_ForEach__body(%Callable* %76, %Array* %b) - call void @__quantum__rt__array_update_alias_count(%Array* %b, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %a, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %76, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %76, i32 -1) - call void @__quantum__rt__qubit_release(%Qubit* %cin) - call void @__quantum__rt__qubit_release(%Qubit* %cout) - call void @__quantum__rt__qubit_release_array(%Array* %b) - call void @__quantum__rt__qubit_release_array(%Array* %a) - ret %Array* %77 ->>>>>>> features/llvm-passes -} - -declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -<<<<<<< HEAD -declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -define internal fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %left, %Qubit* %right) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %left) - call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %left, %Qubit* %right) - ret void -} - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr - -define internal fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { -entry: - call fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %src, %Qubit* %intermediary) - call fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) unnamed_addr { -entry: - call void @__quantum__qis__h(%Qubit* %qubit) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) unnamed_addr { -entry: - call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) - ret void -} - -define internal fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %left, %Qubit* %right) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %left, %Qubit* %right) - call fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %left) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) - ret void -} - -declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) local_unnamed_addr - -declare void @__quantum__qis__h(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__x(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__z(%Qubit*) local_unnamed_addr - -declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr - -declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr - -define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { -entry: - call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - ret void -} - -define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { -entry: - call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i64 0, i64 0)) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) -======= -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr - -declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr - -declare i64 @__quantum__rt__array_get_size_1d(%Array*) local_unnamed_addr - -define internal fastcc %Array* @Microsoft__Quantum__Qir__Emission___73da7dcac81a47ddabb1a0e30be3dfdb_ForEach__body(%Callable* %action, %Array* %array) unnamed_addr { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) - %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) - call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) - call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 1) - %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) - %2 = add i64 %1, -1 - %.not9 = icmp slt i64 %2, 0 - br i1 %.not9, label %exit__1, label %body__1 - -body__1: ; preds = %entry, %exit__4 - %3 = phi i64 [ %32, %exit__4 ], [ 0, %entry ] - %res.010 = phi %Array* [ %14, %exit__4 ], [ %0, %entry ] - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %3) - %5 = bitcast i8* %4 to %Qubit** - %item = load %Qubit*, %Qubit** %5, align 8 - %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) - %8 = bitcast i8* %7 to %Result** - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 8) - %10 = bitcast %Tuple* %9 to %Qubit** - store %Qubit* %item, %Qubit** %10, align 8 - %11 = call %Tuple* @__quantum__rt__tuple_create(i64 8) - call void @__quantum__rt__callable_invoke(%Callable* %action, %Tuple* %9, %Tuple* %11) - %12 = bitcast %Tuple* %11 to %Result** - %13 = load %Result*, %Result** %12, align 8 - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) - store %Result* %13, %Result** %8, align 8 - %14 = call %Array* @__quantum__rt__array_concatenate(%Array* %res.010, %Array* %6) - %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) - %16 = add i64 %15, -1 - %.not57 = icmp slt i64 %16, 0 - br i1 %.not57, label %exit__2, label %body__2 - -exit__1: ; preds = %exit__4, %entry - %res.0.lcssa = phi %Array* [ %0, %entry ], [ %14, %exit__4 ] - call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %res.0.lcssa, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) - ret %Array* %res.0.lcssa - -body__2: ; preds = %body__1, %body__2 - %17 = phi i64 [ %21, %body__2 ], [ 0, %body__1 ] - %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 %17) - %19 = bitcast i8* %18 to %Result** - %20 = load %Result*, %Result** %19, align 8 - call void @__quantum__rt__result_update_reference_count(%Result* %20, i32 1) - %21 = add i64 %17, 1 - %.not5 = icmp sgt i64 %21, %16 - br i1 %.not5, label %exit__2, label %body__2 - -exit__2: ; preds = %body__2, %body__1 - call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %res.010, i32 -1) - %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) - %23 = bitcast i8* %22 to %Result** - %24 = load %Result*, %Result** %23, align 8 - call void @__quantum__rt__result_update_reference_count(%Result* %24, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) - %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %res.010) - %26 = add i64 %25, -1 - %.not68 = icmp slt i64 %26, 0 - br i1 %.not68, label %exit__4, label %body__4 - -body__4: ; preds = %exit__2, %body__4 - %27 = phi i64 [ %31, %body__4 ], [ 0, %exit__2 ] - %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %res.010, i64 %27) - %29 = bitcast i8* %28 to %Result** - %30 = load %Result*, %Result** %29, align 8 - call void @__quantum__rt__result_update_reference_count(%Result* %30, i32 -1) - %31 = add i64 %27, 1 - %.not6 = icmp sgt i64 %31, %26 - br i1 %.not6, label %exit__4, label %body__4 - -exit__4: ; preds = %body__4, %exit__2 - call void @__quantum__rt__array_update_reference_count(%Array* %res.010, i32 -1) - %32 = add i64 %3, 1 - %.not = icmp sgt i64 %32, %2 - br i1 %.not, label %exit__1, label %body__1 -} - -define internal void @Microsoft__Quantum__Qir__Emission__M__body__wrapper(%Tuple* nocapture readnone %capture-tuple, %Tuple* nocapture readonly %arg-tuple, %Tuple* nocapture %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to %Qubit** - %1 = load %Qubit*, %Qubit** %0, align 8 - %2 = call fastcc %Result* @Microsoft__Quantum__Qir__Emission__M__body(%Qubit* %1) - %3 = bitcast %Tuple* %result-tuple to %Result** - store %Result* %2, %Result** %3, align 8 - ret void -} - -declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) local_unnamed_addr - -declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) local_unnamed_addr - -declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) local_unnamed_addr - -declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) local_unnamed_addr - -declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) local_unnamed_addr - -declare %Array* @__quantum__rt__array_create_1d(i32, i64) local_unnamed_addr - -declare void @__quantum__rt__array_update_reference_count(%Array*, i32) local_unnamed_addr - -declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) local_unnamed_addr - -declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) local_unnamed_addr - -declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define { i64, i8* }* @Microsoft__Quantum__Qir__Emission__RunAdder__Interop() local_unnamed_addr #0 { -entry: - %0 = call fastcc %Array* @Microsoft__Quantum__Qir__Emission__RunAdder__body() - %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) - %2 = call i8* @__quantum__rt__memory_allocate(i64 %1) - %3 = ptrtoint i8* %2 to i64 - %4 = add i64 %1, -1 - %.not5 = icmp slt i64 %4, 0 - br i1 %.not5, label %exit__1, label %body__1 - -body__1: ; preds = %entry, %body__1 - %5 = phi i64 [ %14, %body__1 ], [ 0, %entry ] - %6 = add i64 %5, %3 - %7 = inttoptr i64 %6 to i8* - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %5) - %9 = bitcast i8* %8 to %Result** - %10 = load %Result*, %Result** %9, align 8 - %11 = call %Result* @__quantum__rt__result_get_zero() - %12 = call i1 @__quantum__rt__result_equal(%Result* %10, %Result* %11) - %not. = xor i1 %12, true - %13 = sext i1 %not. to i8 - store i8 %13, i8* %7, align 1 - %14 = add i64 %5, 1 - %.not = icmp sgt i64 %14, %4 - br i1 %.not, label %exit__1, label %body__1 - -exit__1: ; preds = %body__1, %entry - %15 = call i8* @__quantum__rt__memory_allocate(i64 16) - %16 = bitcast i8* %15 to i64* - store i64 %1, i64* %16, align 4 - %17 = getelementptr i8, i8* %15, i64 8 - %18 = bitcast i8* %17 to i8** - store i8* %2, i8** %18, align 8 - %.not34 = icmp slt i64 %4, 0 - br i1 %.not34, label %exit__2, label %body__2 - -body__2: ; preds = %exit__1, %body__2 - %19 = phi i64 [ %23, %body__2 ], [ 0, %exit__1 ] - %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %19) - %21 = bitcast i8* %20 to %Result** - %22 = load %Result*, %Result** %21, align 8 - call void @__quantum__rt__result_update_reference_count(%Result* %22, i32 -1) - %23 = add i64 %19, 1 - %.not3 = icmp sgt i64 %23, %4 - br i1 %.not3, label %exit__2, label %body__2 - -exit__2: ; preds = %body__2, %exit__1 - %24 = bitcast i8* %15 to { i64, i8* }* - call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) - ret { i64, i8* }* %24 -} - -declare i8* @__quantum__rt__memory_allocate(i64) local_unnamed_addr - -declare %Result* @__quantum__rt__result_get_zero() local_unnamed_addr - -declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr - -define void @Microsoft__Quantum__Qir__Emission__RunAdder() local_unnamed_addr #1 { -entry: - %0 = call fastcc %Array* @Microsoft__Quantum__Qir__Emission__RunAdder__body() - %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i64 0, i64 0)) - %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @1, i64 0, i64 0)) - call void @__quantum__rt__string_update_reference_count(%String* %2, i32 1) - %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) - %4 = add i64 %3, -1 - %.not7 = icmp slt i64 %4, 0 - br i1 %.not7, label %exit__1, label %body__1 - -body__1: ; preds = %entry, %condContinue__1 - %5 = phi i64 [ %14, %condContinue__1 ], [ 0, %entry ] - %6 = phi %String* [ %13, %condContinue__1 ], [ %2, %entry ] - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %5) - %8 = bitcast i8* %7 to %Result** - %9 = load %Result*, %Result** %8, align 8 - %.not5 = icmp eq %String* %6, %2 - br i1 %.not5, label %condContinue__1, label %condTrue__1 - -condTrue__1: ; preds = %body__1 - %10 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %1) - call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) - br label %condContinue__1 - -condContinue__1: ; preds = %condTrue__1, %body__1 - %11 = phi %String* [ %10, %condTrue__1 ], [ %6, %body__1 ] - %12 = call %String* @__quantum__rt__result_to_string(%Result* %9) - %13 = call %String* @__quantum__rt__string_concatenate(%String* %11, %String* %12) - call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) - %14 = add i64 %5, 1 - %.not = icmp sgt i64 %14, %4 - br i1 %.not, label %exit__1, label %body__1 - -exit__1: ; preds = %condContinue__1, %entry - %.lcssa = phi %String* [ %2, %entry ], [ %13, %condContinue__1 ] - %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i64 0, i64 0)) - %16 = call %String* @__quantum__rt__string_concatenate(%String* %.lcssa, %String* %15) - call void @__quantum__rt__string_update_reference_count(%String* %.lcssa, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) - call void @__quantum__rt__message(%String* %16) - %.not46 = icmp slt i64 %4, 0 - br i1 %.not46, label %exit__2, label %body__2 - -body__2: ; preds = %exit__1, %body__2 - %17 = phi i64 [ %21, %body__2 ], [ 0, %exit__1 ] - %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %17) - %19 = bitcast i8* %18 to %Result** - %20 = load %Result*, %Result** %19, align 8 - call void @__quantum__rt__result_update_reference_count(%Result* %20, i32 -1) - %21 = add i64 %17, 1 - %.not4 = icmp sgt i64 %21, %4 - br i1 %.not4, label %exit__2, label %body__2 - -exit__2: ; preds = %body__2, %exit__1 - call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) ->>>>>>> features/llvm-passes - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -<<<<<<< HEAD -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr -======= -declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr - -declare %String* @__quantum__rt__string_concatenate(%String*, %String*) local_unnamed_addr - -declare %String* @__quantum__rt__result_to_string(%Result*) local_unnamed_addr ->>>>>>> features/llvm-passes - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } From ecce88fa63036c6b88c87179bf1b6d2b2c3282a0 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 11 Aug 2021 16:21:55 +0200 Subject: [PATCH 075/106] Removing garbage files --- src/Passes/CMakeLists.txt | 2 +- src/Passes/CMakeLists_BACKUP_73868.txt | 60 ------------------- src/Passes/CMakeLists_BACKUP_73960.txt | 60 ------------------- src/Passes/CMakeLists_LOCAL_73868.txt | 52 ---------------- src/Passes/CMakeLists_LOCAL_73960.txt | 52 ---------------- src/Passes/CMakeLists_REMOTE_73868.txt | 48 --------------- src/Passes/CMakeLists_REMOTE_73960.txt | 48 --------------- src/Passes/Source/Apps/Qat/Qat.cpp | 5 +- src/Passes/Source/Llvm/Llvm.hpp | 1 + .../ConstSizeArray_BACKUP_74058.csproj | 20 ------- .../ConstSizeArray_LOCAL_74058.csproj | 13 ---- .../ConstSizeArray_REMOTE_74058.csproj | 9 --- .../examples/QubitAllocationAnalysis/Makefile | 2 +- 13 files changed, 7 insertions(+), 365 deletions(-) delete mode 100644 src/Passes/CMakeLists_BACKUP_73868.txt delete mode 100644 src/Passes/CMakeLists_BACKUP_73960.txt delete mode 100644 src/Passes/CMakeLists_LOCAL_73868.txt delete mode 100644 src/Passes/CMakeLists_LOCAL_73960.txt delete mode 100644 src/Passes/CMakeLists_REMOTE_73868.txt delete mode 100644 src/Passes/CMakeLists_REMOTE_73960.txt delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_BACKUP_74058.csproj delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_LOCAL_74058.csproj delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_REMOTE_74058.csproj diff --git a/src/Passes/CMakeLists.txt b/src/Passes/CMakeLists.txt index 76aed4fe10..f6f3784afb 100644 --- a/src/Passes/CMakeLists.txt +++ b/src/Passes/CMakeLists.txt @@ -43,7 +43,7 @@ include_directories(${LLVM_INCLUDE_DIRS}) link_directories(${LLVM_LIBRARY_DIRS}) add_definitions(${LLVM_DEFINITIONS}) include_directories(${CMAKE_SOURCE_DIR}/Source) -llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit orcshared orctargetprocess x86asmparser x86codegen x86desc x86disassembler x86info interpreter) +llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit orctargetprocess x86asmparser x86codegen x86desc x86disassembler x86info interpreter) # Adding the libraries diff --git a/src/Passes/CMakeLists_BACKUP_73868.txt b/src/Passes/CMakeLists_BACKUP_73868.txt deleted file mode 100644 index 5459a689ea..0000000000 --- a/src/Passes/CMakeLists_BACKUP_73868.txt +++ /dev/null @@ -1,60 +0,0 @@ -cmake_minimum_required(VERSION 3.4.3) - -<<<<<<< HEAD -project(QirPasses) -======= -project(Passes) - -if (WIN32) - message(STATUS "Adding C:\\Program Files\\LLVM\\lib to LLVM paths") - find_package(LLVM REQUIRED PATHS "C:\\Program Files\\LLVM\\lib" CONFIG ) -else() - find_package(LLVM REQUIRED CONFIG) -endif() - ->>>>>>> features/llvm-passes - -include(CheckCXXCompilerFlag) - -message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") -message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") - -# Setting the standard configuration for the C++ compiler -# Rather than allowing C++17, we restrict ourselves to -# C++14 as this is the standard currently used by the LLVM -# project for compilation of the framework. While there is -# a very small chance that the difference in standard -# would break things, it is a possibility nonetheless. -set(CMAKE_CXX_STANDARD 14) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") - -# LLVM is normally built without RTTI. Be consistent with that. -if(NOT LLVM_ENABLE_RTTI) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") -endif() - -# We export the compile commands which are needed by clang-tidy -# to run the static analysis -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -# Adding LLVM include directories. We may choose -# to move this to a module level at a later point -include_directories(${LLVM_INCLUDE_DIRS}) -link_directories(${LLVM_LIBRARY_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/Source) -llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit orcshared orctargetprocess x86asmparser x86codegen x86desc x86disassembler x86info interpreter) - - -# Adding the libraries -<<<<<<< HEAD -add_subdirectory(Source) -add_subdirectory(tests) - -======= -add_subdirectory(libs) -add_subdirectory(tests) ->>>>>>> features/llvm-passes diff --git a/src/Passes/CMakeLists_BACKUP_73960.txt b/src/Passes/CMakeLists_BACKUP_73960.txt deleted file mode 100644 index 5459a689ea..0000000000 --- a/src/Passes/CMakeLists_BACKUP_73960.txt +++ /dev/null @@ -1,60 +0,0 @@ -cmake_minimum_required(VERSION 3.4.3) - -<<<<<<< HEAD -project(QirPasses) -======= -project(Passes) - -if (WIN32) - message(STATUS "Adding C:\\Program Files\\LLVM\\lib to LLVM paths") - find_package(LLVM REQUIRED PATHS "C:\\Program Files\\LLVM\\lib" CONFIG ) -else() - find_package(LLVM REQUIRED CONFIG) -endif() - ->>>>>>> features/llvm-passes - -include(CheckCXXCompilerFlag) - -message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") -message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") - -# Setting the standard configuration for the C++ compiler -# Rather than allowing C++17, we restrict ourselves to -# C++14 as this is the standard currently used by the LLVM -# project for compilation of the framework. While there is -# a very small chance that the difference in standard -# would break things, it is a possibility nonetheless. -set(CMAKE_CXX_STANDARD 14) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") - -# LLVM is normally built without RTTI. Be consistent with that. -if(NOT LLVM_ENABLE_RTTI) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") -endif() - -# We export the compile commands which are needed by clang-tidy -# to run the static analysis -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -# Adding LLVM include directories. We may choose -# to move this to a module level at a later point -include_directories(${LLVM_INCLUDE_DIRS}) -link_directories(${LLVM_LIBRARY_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/Source) -llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit orcshared orctargetprocess x86asmparser x86codegen x86desc x86disassembler x86info interpreter) - - -# Adding the libraries -<<<<<<< HEAD -add_subdirectory(Source) -add_subdirectory(tests) - -======= -add_subdirectory(libs) -add_subdirectory(tests) ->>>>>>> features/llvm-passes diff --git a/src/Passes/CMakeLists_LOCAL_73868.txt b/src/Passes/CMakeLists_LOCAL_73868.txt deleted file mode 100644 index 76aed4fe10..0000000000 --- a/src/Passes/CMakeLists_LOCAL_73868.txt +++ /dev/null @@ -1,52 +0,0 @@ -cmake_minimum_required(VERSION 3.4.3) - -project(QirPasses) - -find_package(LLVM REQUIRED CONFIG) -include(CheckCXXCompilerFlag) - -message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") -message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") - -# Setting the standard configuration for the C++ compiler -# Rather than allowing C++17, we restrict ourselves to -# C++14 as this is the standard currently used by the LLVM -# project for compilation of the framework. While there is -# a very small chance that the difference in standard -# would break things, it is a possibility nonetheless. -set(CMAKE_CXX_STANDARD 14) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") - -# LLVM is normally built without RTTI. Be consistent with that. -if(NOT LLVM_ENABLE_RTTI) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") -endif() - -# -fvisibility-inlines-hidden is set when building LLVM and on Darwin warnings -# are triggered if llvm-tutor is built without this flag (though otherwise it -# builds fine). For consistency, add it here too. -check_cxx_compiler_flag("-fvisibility-inlines-hidden" SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG) -if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} EQUAL "1") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") -endif() - -# We export the compile commands which are needed by clang-tidy -# to run the static analysis -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -# Adding LLVM include directories. We may choose -# to move this to a module level at a later point -include_directories(${LLVM_INCLUDE_DIRS}) -link_directories(${LLVM_LIBRARY_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/Source) -llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit orcshared orctargetprocess x86asmparser x86codegen x86desc x86disassembler x86info interpreter) - - -# Adding the libraries -add_subdirectory(Source) -add_subdirectory(tests) - diff --git a/src/Passes/CMakeLists_LOCAL_73960.txt b/src/Passes/CMakeLists_LOCAL_73960.txt deleted file mode 100644 index 76aed4fe10..0000000000 --- a/src/Passes/CMakeLists_LOCAL_73960.txt +++ /dev/null @@ -1,52 +0,0 @@ -cmake_minimum_required(VERSION 3.4.3) - -project(QirPasses) - -find_package(LLVM REQUIRED CONFIG) -include(CheckCXXCompilerFlag) - -message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") -message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") - -# Setting the standard configuration for the C++ compiler -# Rather than allowing C++17, we restrict ourselves to -# C++14 as this is the standard currently used by the LLVM -# project for compilation of the framework. While there is -# a very small chance that the difference in standard -# would break things, it is a possibility nonetheless. -set(CMAKE_CXX_STANDARD 14) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") - -# LLVM is normally built without RTTI. Be consistent with that. -if(NOT LLVM_ENABLE_RTTI) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") -endif() - -# -fvisibility-inlines-hidden is set when building LLVM and on Darwin warnings -# are triggered if llvm-tutor is built without this flag (though otherwise it -# builds fine). For consistency, add it here too. -check_cxx_compiler_flag("-fvisibility-inlines-hidden" SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG) -if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} EQUAL "1") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") -endif() - -# We export the compile commands which are needed by clang-tidy -# to run the static analysis -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -# Adding LLVM include directories. We may choose -# to move this to a module level at a later point -include_directories(${LLVM_INCLUDE_DIRS}) -link_directories(${LLVM_LIBRARY_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/Source) -llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit orcshared orctargetprocess x86asmparser x86codegen x86desc x86disassembler x86info interpreter) - - -# Adding the libraries -add_subdirectory(Source) -add_subdirectory(tests) - diff --git a/src/Passes/CMakeLists_REMOTE_73868.txt b/src/Passes/CMakeLists_REMOTE_73868.txt deleted file mode 100644 index 758830396a..0000000000 --- a/src/Passes/CMakeLists_REMOTE_73868.txt +++ /dev/null @@ -1,48 +0,0 @@ -cmake_minimum_required(VERSION 3.4.3) - -project(Passes) - -if (WIN32) - message(STATUS "Adding C:\\Program Files\\LLVM\\lib to LLVM paths") - find_package(LLVM REQUIRED PATHS "C:\\Program Files\\LLVM\\lib" CONFIG ) -else() - find_package(LLVM REQUIRED CONFIG) -endif() - - -include(CheckCXXCompilerFlag) - -message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") -message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") - -# Setting the standard configuration for the C++ compiler -# Rather than allowing C++17, we restrict ourselves to -# C++14 as this is the standard currently used by the LLVM -# project for compilation of the framework. While there is -# a very small chance that the difference in standard -# would break things, it is a possibility nonetheless. -set(CMAKE_CXX_STANDARD 14) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") - -# LLVM is normally built without RTTI. Be consistent with that. -if(NOT LLVM_ENABLE_RTTI) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") -endif() - -# We export the compile commands which are needed by clang-tidy -# to run the static analysis -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -# Adding LLVM include directories. We may choose -# to move this to a module level at a later point -include_directories(${LLVM_INCLUDE_DIRS}) -link_directories(${LLVM_LIBRARY_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/src) - -# Adding the libraries -add_subdirectory(libs) -add_subdirectory(tests) \ No newline at end of file diff --git a/src/Passes/CMakeLists_REMOTE_73960.txt b/src/Passes/CMakeLists_REMOTE_73960.txt deleted file mode 100644 index 758830396a..0000000000 --- a/src/Passes/CMakeLists_REMOTE_73960.txt +++ /dev/null @@ -1,48 +0,0 @@ -cmake_minimum_required(VERSION 3.4.3) - -project(Passes) - -if (WIN32) - message(STATUS "Adding C:\\Program Files\\LLVM\\lib to LLVM paths") - find_package(LLVM REQUIRED PATHS "C:\\Program Files\\LLVM\\lib" CONFIG ) -else() - find_package(LLVM REQUIRED CONFIG) -endif() - - -include(CheckCXXCompilerFlag) - -message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") -message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") - -# Setting the standard configuration for the C++ compiler -# Rather than allowing C++17, we restrict ourselves to -# C++14 as this is the standard currently used by the LLVM -# project for compilation of the framework. While there is -# a very small chance that the difference in standard -# would break things, it is a possibility nonetheless. -set(CMAKE_CXX_STANDARD 14) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") - -# LLVM is normally built without RTTI. Be consistent with that. -if(NOT LLVM_ENABLE_RTTI) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") -endif() - -# We export the compile commands which are needed by clang-tidy -# to run the static analysis -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -# Adding LLVM include directories. We may choose -# to move this to a module level at a later point -include_directories(${LLVM_INCLUDE_DIRS}) -link_directories(${LLVM_LIBRARY_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/src) - -# Adding the libraries -add_subdirectory(libs) -add_subdirectory(tests) \ No newline at end of file diff --git a/src/Passes/Source/Apps/Qat/Qat.cpp b/src/Passes/Source/Apps/Qat/Qat.cpp index 3c851b24ec..c46eee1d66 100644 --- a/src/Passes/Source/Apps/Qat/Qat.cpp +++ b/src/Passes/Source/Apps/Qat/Qat.cpp @@ -12,10 +12,13 @@ int main(int /*argc*/, char **argv) FunctionPassManager FPM; // InstSimplifyPass is a function pass + FPM.addPass(LoopSimplifyPass()); + // FPM.addPass(LoopUnrollPass()); + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); - MPM.run(*module); + // MPM.run(*module); // m->print(llvm) llvm::errs() << *module << "\n"; } diff --git a/src/Passes/Source/Llvm/Llvm.hpp b/src/Passes/Source/Llvm/Llvm.hpp index 3611bf8cba..7bf11aa479 100644 --- a/src/Passes/Source/Llvm/Llvm.hpp +++ b/src/Passes/Source/Llvm/Llvm.hpp @@ -48,6 +48,7 @@ // Reader tool #include "llvm/IRReader/IRReader.h" #include "llvm/Support/SourceMgr.h" +#include "llvm/Transforms/Scalar/LoopUnrollPass.h" #if defined(__clang__) #pragma clang diagnostic pop diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_BACKUP_74058.csproj b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_BACKUP_74058.csproj deleted file mode 100644 index 3e7d7a75ea..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_BACKUP_74058.csproj +++ /dev/null @@ -1,20 +0,0 @@ -<<<<<<< HEAD - -======= - ->>>>>>> features/llvm-passes - - - Exe - netcoreapp3.1 - true - - -<<<<<<< HEAD - - - - -======= ->>>>>>> features/llvm-passes - diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_LOCAL_74058.csproj b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_LOCAL_74058.csproj deleted file mode 100644 index ab96e16e33..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_LOCAL_74058.csproj +++ /dev/null @@ -1,13 +0,0 @@ - - - - Exe - netcoreapp3.1 - true - - - - - - - diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_REMOTE_74058.csproj b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_REMOTE_74058.csproj deleted file mode 100644 index eeab572589..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_REMOTE_74058.csproj +++ /dev/null @@ -1,9 +0,0 @@ - - - - Exe - netcoreapp3.1 - true - - - diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index 4924796194..f9937dca44 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -10,7 +10,7 @@ run-replace: build-ir build-qaa build-esa analysis-example.ll # opt -loop-unroll -unroll-count=3 -unroll-allow-partial opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib \ -load-pass-plugin ../../Debug/Source/Passes/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll > test1.ll - opt -load-pass-plugin ../../Debug/Source/Passes/libTransformationRule.dylib --passes="mem2reg,simplifycfg,loop-simplify,loop-unroll,restrict-qir" -S test1.ll > test2.ll + opt -load-pass-plugin ../../Debug/Source/Passes/libTransformationRule.dylib --passes="loop-simplify,loop-unroll,restrict-qir" -S test1.ll > test2.ll opt --passes="inline" -S test2.ll | opt -O1 -S From 85e76b63ce8549fd91dfd6d4e57deb482b51f7da Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 11 Aug 2021 19:40:20 +0200 Subject: [PATCH 076/106] Creating prototype QAT --- src/Passes/CMakeLists.txt | 2 +- src/Passes/Source/Apps/CMakeLists.txt | 1 + src/Passes/Source/Apps/Qat/Qat.cpp | 53 +++++++++++++++++++++++---- src/Passes/Source/Llvm/Llvm.hpp | 1 + 4 files changed, 48 insertions(+), 9 deletions(-) diff --git a/src/Passes/CMakeLists.txt b/src/Passes/CMakeLists.txt index f6f3784afb..a2a1074141 100644 --- a/src/Passes/CMakeLists.txt +++ b/src/Passes/CMakeLists.txt @@ -43,7 +43,7 @@ include_directories(${LLVM_INCLUDE_DIRS}) link_directories(${LLVM_LIBRARY_DIRS}) add_definitions(${LLVM_DEFINITIONS}) include_directories(${CMAKE_SOURCE_DIR}/Source) -llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit orctargetprocess x86asmparser x86codegen x86desc x86disassembler x86info interpreter) +llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit x86asmparser x86codegen x86desc x86disassembler x86info interpreter) # Adding the libraries diff --git a/src/Passes/Source/Apps/CMakeLists.txt b/src/Passes/Source/Apps/CMakeLists.txt index 3d4d5dcc03..60218d1de9 100644 --- a/src/Passes/Source/Apps/CMakeLists.txt +++ b/src/Passes/Source/Apps/CMakeLists.txt @@ -1,3 +1,4 @@ add_executable(qat Qat/Qat.cpp) target_link_libraries(qat ${llvm_libs}) +target_link_libraries(qat TransformationRule Rules AllocationManager) \ No newline at end of file diff --git a/src/Passes/Source/Apps/Qat/Qat.cpp b/src/Passes/Source/Apps/Qat/Qat.cpp index c46eee1d66..f77dc64391 100644 --- a/src/Passes/Source/Apps/Qat/Qat.cpp +++ b/src/Passes/Source/Apps/Qat/Qat.cpp @@ -1,6 +1,10 @@ #include "Llvm/Llvm.hpp" +#include "Passes/TransformationRule/TransformationRule.hpp" +#include "Rules/Factory.hpp" using namespace llvm; +using namespace microsoft::quantum; + int main(int /*argc*/, char **argv) { LLVMContext context; @@ -8,18 +12,51 @@ int main(int /*argc*/, char **argv) auto module = parseIRFile(argv[1], error, context); if (module) { - ModulePassManager MPM; - FunctionPassManager FPM; - // InstSimplifyPass is a function pass + llvm::PassBuilder passBuilder; + llvm::LoopAnalysisManager loopAnalysisManager(true); // true is just to output debug info + llvm::FunctionAnalysisManager functionAnalysisManager(true); + llvm::CGSCCAnalysisManager cGSCCAnalysisManager(true); + llvm::ModuleAnalysisManager moduleAnalysisManager(true); + + passBuilder.registerModuleAnalyses(moduleAnalysisManager); + passBuilder.registerCGSCCAnalyses(cGSCCAnalysisManager); + passBuilder.registerFunctionAnalyses(functionAnalysisManager); + passBuilder.registerLoopAnalyses(loopAnalysisManager); + // This is the important line: + passBuilder.crossRegisterProxies(loopAnalysisManager, functionAnalysisManager, + cGSCCAnalysisManager, moduleAnalysisManager); + + auto functionPassManager = passBuilder.buildFunctionSimplificationPipeline( + llvm::PassBuilder::OptimizationLevel::O1, llvm::PassBuilder::ThinLTOPhase::None, true); + + RuleSet rule_set; + + // Defining the mapping + auto factory = RuleFactory(rule_set); + + factory.useStaticQuantumArrayAllocation(); + factory.useStaticQuantumAllocation(); + factory.useStaticResultAllocation(); + + factory.optimiseBranchQuatumOne(); + // factory.optimiseBranchQuatumZero(); + + factory.disableReferenceCounting(); + factory.disableAliasCounting(); + factory.disableStringSupport(); + + functionPassManager.addPass(TransformationRulePass(std::move(rule_set))); + + llvm::ModulePassManager modulePassManager = + passBuilder.buildPerModuleDefaultPipeline(llvm::PassBuilder::OptimizationLevel::O1); - FPM.addPass(LoopSimplifyPass()); - // FPM.addPass(LoopUnrollPass()); + // https://llvm.org/docs/NewPassManager.html + // modulePassManager.addPass(createModuleToCGSCCPassAdaptor(...)); + // InlinerPass() - MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + modulePassManager.run(*module, moduleAnalysisManager); - // MPM.run(*module); - // m->print(llvm) llvm::errs() << *module << "\n"; } diff --git a/src/Passes/Source/Llvm/Llvm.hpp b/src/Passes/Source/Llvm/Llvm.hpp index 7bf11aa479..4b5a2ab4b5 100644 --- a/src/Passes/Source/Llvm/Llvm.hpp +++ b/src/Passes/Source/Llvm/Llvm.hpp @@ -48,6 +48,7 @@ // Reader tool #include "llvm/IRReader/IRReader.h" #include "llvm/Support/SourceMgr.h" +#include "llvm/Transforms/IPO/Inliner.h" #include "llvm/Transforms/Scalar/LoopUnrollPass.h" #if defined(__clang__) From c1cebb99b93ea2c0d28974daae229773f8bb67e0 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 12 Aug 2021 10:45:18 +0200 Subject: [PATCH 077/106] Preparing profile directory --- src/Passes/CMakeLists_BASE_73868.txt | 48 ---- src/Passes/CMakeLists_BASE_73960.txt | 48 ---- src/Passes/README.md | 248 ++++++++---------- src/Passes/Source/Apps/CMakeLists.txt | 2 +- src/Passes/Source/Apps/Qat/Qat.cpp | 113 +++++--- src/Passes/Source/CMakeLists.txt | 36 +++ .../Source/Commandline/ParameterParser.cpp | 94 +++++++ .../Source/Commandline/ParameterParser.hpp | 43 +++ src/Passes/Source/Commandline/Settings.hpp | 63 +++++ .../Passes/OpsCounter/LibOpsCounter.cpp | 47 ---- .../Source/Passes/OpsCounter/OpsCounter.cpp | 82 ------ .../Source/Passes/OpsCounter/OpsCounter.hpp | 67 ----- src/Passes/Source/Profiles/IProfile.cpp | 9 + src/Passes/Source/Profiles/IProfile.hpp | 19 ++ 14 files changed, 446 insertions(+), 473 deletions(-) delete mode 100644 src/Passes/CMakeLists_BASE_73868.txt delete mode 100644 src/Passes/CMakeLists_BASE_73960.txt create mode 100644 src/Passes/Source/Commandline/ParameterParser.cpp create mode 100644 src/Passes/Source/Commandline/ParameterParser.hpp create mode 100644 src/Passes/Source/Commandline/Settings.hpp delete mode 100644 src/Passes/Source/Passes/OpsCounter/LibOpsCounter.cpp delete mode 100644 src/Passes/Source/Passes/OpsCounter/OpsCounter.cpp delete mode 100644 src/Passes/Source/Passes/OpsCounter/OpsCounter.hpp create mode 100644 src/Passes/Source/Profiles/IProfile.cpp create mode 100644 src/Passes/Source/Profiles/IProfile.hpp diff --git a/src/Passes/CMakeLists_BASE_73868.txt b/src/Passes/CMakeLists_BASE_73868.txt deleted file mode 100644 index 41ca853b39..0000000000 --- a/src/Passes/CMakeLists_BASE_73868.txt +++ /dev/null @@ -1,48 +0,0 @@ -cmake_minimum_required(VERSION 3.4.3) - -project(QSharpPasses) - -find_package(LLVM REQUIRED CONFIG) -include(CheckCXXCompilerFlag) - -message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") -message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") - -# Setting the standard configuration for the C++ compiler -# Rather than allowing C++17, we restrict ourselves to -# C++14 as this is the standard currently used by the LLVM -# project for compilation of the framework. While there is -# a very small chance that the difference in standard -# would break things, it is a possibility nonetheless. -set(CMAKE_CXX_STANDARD 14) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") - -# LLVM is normally built without RTTI. Be consistent with that. -if(NOT LLVM_ENABLE_RTTI) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") -endif() - -# -fvisibility-inlines-hidden is set when building LLVM and on Darwin warnings -# are triggered if llvm-tutor is built without this flag (though otherwise it -# builds fine). For consistency, add it here too. -check_cxx_compiler_flag("-fvisibility-inlines-hidden" SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG) -if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} EQUAL "1") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") -endif() - -# We export the compile commands which are needed by clang-tidy -# to run the static analysis -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -# Adding LLVM include directories. We may choose -# to move this to a module level at a later point -include_directories(${LLVM_INCLUDE_DIRS}) -link_directories(${LLVM_LIBRARY_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/src) - -# Adding the libraries -add_subdirectory(libs) diff --git a/src/Passes/CMakeLists_BASE_73960.txt b/src/Passes/CMakeLists_BASE_73960.txt deleted file mode 100644 index 41ca853b39..0000000000 --- a/src/Passes/CMakeLists_BASE_73960.txt +++ /dev/null @@ -1,48 +0,0 @@ -cmake_minimum_required(VERSION 3.4.3) - -project(QSharpPasses) - -find_package(LLVM REQUIRED CONFIG) -include(CheckCXXCompilerFlag) - -message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") -message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") - -# Setting the standard configuration for the C++ compiler -# Rather than allowing C++17, we restrict ourselves to -# C++14 as this is the standard currently used by the LLVM -# project for compilation of the framework. While there is -# a very small chance that the difference in standard -# would break things, it is a possibility nonetheless. -set(CMAKE_CXX_STANDARD 14) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") - -# LLVM is normally built without RTTI. Be consistent with that. -if(NOT LLVM_ENABLE_RTTI) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") -endif() - -# -fvisibility-inlines-hidden is set when building LLVM and on Darwin warnings -# are triggered if llvm-tutor is built without this flag (though otherwise it -# builds fine). For consistency, add it here too. -check_cxx_compiler_flag("-fvisibility-inlines-hidden" SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG) -if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} EQUAL "1") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") -endif() - -# We export the compile commands which are needed by clang-tidy -# to run the static analysis -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -# Adding LLVM include directories. We may choose -# to move this to a module level at a later point -include_directories(${LLVM_INCLUDE_DIRS}) -link_directories(${LLVM_LIBRARY_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/src) - -# Adding the libraries -add_subdirectory(libs) diff --git a/src/Passes/README.md b/src/Passes/README.md index b6afba93b5..b014a9edbf 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -1,191 +1,153 @@ -# QIR Passes for LLVM +# Profile adoption tool -This library defines [LLVM passes](https://llvm.org/docs/Passes.html) used for analysing, optimising and transforming the IR. The QIR pass library is a dynamic library that can be compiled and ran separately from the -rest of the project code. While it is not clear whether this possible at the moment, we hope that it will be possible to write passes that enforce the [QIR specification](https://github.com/microsoft/qsharp-language/tree/main/Specifications/QIR). +# Getting started -## What do LLVM passes do? +## Dependencies -Before getting started, we here provide a few examples of classical use cases for [LLVM passes](https://llvm.org/docs/Passes.html). You find additional [instructive examples here][1]. +This library is written in C++ and depends on: -**Example 1: Transformation**. As a first example of what [LLVM passes](https://llvm.org/docs/Passes.html) can do, we look at optimisation. Consider a compiler which -compiles +- LLVM -```c -double test(double x) { - return (1+2+x)*(x+(1+2)); -} -``` +Additional development dependencies include: -into following IR: +- CMake +- clang-format +- clang-tidy -``` -define double @test(double %x) { -entry: - %addtmp = fadd double 3.000000e+00, %x - %addtmp1 = fadd double %x, 3.000000e+00 - %multmp = fmul double %addtmp, %addtmp1 - ret double %multmp -} -``` +## Building the passes -This code is obviously inefficient as we could get rid of one operation by rewritting the code to: +To build the passes, create a new build directory and switch to that directory: -```c -double test(double x) { - double y = 3+x; - return y * y; -} +```sh +mkdir Debug +cd Debug/ ``` -One purpose of [LLVM passes](https://llvm.org/docs/Passes.html) is to allow automatic transformation from the above IR to the IR: +To build the library, first configure CMake from the build directory +```sh +cmake .. ``` -define double @test(double %x) { -entry: - %addtmp = fadd double %x, 3.000000e+00 - %multmp = fmul double %addtmp, %addtmp - ret double %multmp -} + +and then make your target + +```sh +make [target] ``` -**Example 2: Analytics**. Another example of useful passes are those generating and collecting statistics about the program. For instance, one analytics program -makes sense for classical programs is to count instructions used to implement functions. Take the C program: +The default target is `all`. Other valid targets are the name of the folders in `libs/` found in the passes root. -```c -int foo(int x) -{ - return x; -} +# Profile adoption tool -void bar(int x, int y) -{ - foo(x + y); -} +## Building QAT -int main() -{ - foo(2); - bar(3, 2); +First - return 0; -} +```sh +cd Debug +make qat ``` -which produces follow IR (without optimisation): - -```language -define dso_local i32 @foo(i32 %0) #0 { - %2 = alloca i32, align 4 - store i32 %0, i32* %2, align 4 - %3 = load i32, i32* %2, align 4 - ret i32 %3 -} - -define dso_local void @bar(i32 %0, i32 %1) #0 { - %3 = alloca i32, align 4 - %4 = alloca i32, align 4 - store i32 %0, i32* %3, align 4 - store i32 %1, i32* %4, align 4 - %5 = load i32, i32* %3, align 4 - %6 = load i32, i32* %4, align 4 - %7 = add nsw i32 %5, %6 - %8 = call i32 @foo(i32 %7) - ret void -} - -define dso_local i32 @main() #0 { - %1 = alloca i32, align 4 - store i32 0, i32* %1, align 4 - %2 = call i32 @foo(i32 2) - call void @bar(i32 3, i32 2) - ret i32 0 -} -``` +then -A stat pass for this code, would collect following statisics: - -```text -Stats for 'foo' -=========================== -Opcode # Used ---------------------------- -load 1 -ret 1 -alloca 1 -store 1 ---------------------------- - -Stats for 'bar' -=========================== -Opcode # Used ---------------------------- -load 2 -add 1 -ret 1 -alloca 2 -store 2 -call 1 ---------------------------- - -Stats for 'main' -=========================== -Opcode # Used ---------------------------- -ret 1 -alloca 1 -store 1 -call 2 ---------------------------- +```sh +./Source/Apps/qat ``` -**Example 3: Code validation**. A third use case is code validation. For example, one could write a pass to check whether bounds are exceeded on [static arrays][2]. -Note that this is a non-standard usecase as such analysis is usually made using the AST rather than at the IR level. +## Implementing a profile pass -**References** +As an example of how one can implement a new profile pass, we here show the implementational details of our example pass which allows mapping the teleportation code to the base profile: -- [1] https://github.com/banach-space/llvm-tutor#analysis-vs-transformation-pass -- [2] https://github.com/victor-fdez/llvm-array-check-pass +```c++ + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + // Base profile + if (name == "restrict-qir") + { + RuleSet rule_set; -## Out-of-source Pass + // Defining the mapping + auto factory = RuleFactory(rule_set); -This library is build as set of out-of-source-passes. All this means is that we will not be downloading the LLVM repository and modifying this repository directly. You can read more [here](https://llvm.org/docs/CMake.html#cmake-out-of-source-pass). + factory.useStaticQuantumArrayAllocation(); + factory.useStaticQuantumAllocation(); + factory.useStaticResultAllocation(); -# Getting started + factory.optimiseBranchQuatumOne(); + // factory.optimiseBranchQuatumZero(); -## Dependencies + factory.disableReferenceCounting(); + factory.disableAliasCounting(); + factory.disableStringSupport(); -This library is written in C++ and depends on: + fpm.addPass(TransformationRulePass(std::move(rule_set))); + return true; + } -- LLVM + return false; + }); + }}; +``` -Additional development dependencies include: +Transformations of the IR will happen on the basis of what rules are added to the rule set. The purpose of the factory is to make easy to add rules that serve a single purpose as well as making a basis for making rules unit testable. -- CMake -- clang-format -- clang-tidy +## Implementing new rules -## Building the passes +Implementing new rules consists of two steps: Defining a pattern that one wish to replace and implementing the corresponding replacement logic. Inside a factory member function, this look as follows: -To build the passes, create a new build directory and switch to that directory: +```c++ + auto get_element = + Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); + auto cast_pattern = BitCast("getElement"_cap = get_element); + auto load_pattern = Load("cast"_cap = cast_pattern); -```sh -mkdir Debug -cd Debug/ + addRule({std::move(load_pattern), access_replacer}); ``` -To build the library, first configure CMake from the build directory +where `addRule` adds the rule to the current rule set. -```sh -cmake .. +### Capturing patterns + +The pattern defined in this snippet matches IR like: + +```c++ + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %2 = load %Qubit*, %Qubit** %1, align 8 ``` -and then make your target +In the above rule, the first and a second argument of `__quantum__rt__array_get_element_ptr_1d` is captured as `arrayName` and `index`, respectively. Likewise, the bitcast instruction is captured as `cast`. Each of these captures will be available inside the replacement function `access_replacer`. -```sh -make [target] +### Implementing replacement logic + +After a positive match is found, the lead instruction alongside a IRBuilder, a capture table and a replacement table is passed to the replacement function. Here is an example on how one can access the captured variables to perform a transformation of the IR: + +```c++ + auto access_replacer = [qubit_alloc_manager](Builder &builder, Value *val, Captures &cap, + Replacements &replacements) { + // ... + auto cst = llvm::dyn_cast(cap["index"]); + // ... + auto llvm_size = cst->getValue(); + auto offset = qubit_alloc_manager->getOffset(cap["arrayName"]->getName().str()); + + auto idx = llvm::APInt(llvm_size.getBitWidth(), llvm_size.getZExtValue() + offset); + auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); + auto instr = new llvm::IntToPtrInst(new_index, ptr_type); + instr->takeName(val); + + // Replacing the lead instruction with a the new instruction + replacements.push_back({llvm::dyn_cast(val), instr}); + + // Deleting the getelement and cast operations + replacements.push_back({llvm::dyn_cast(cap["getElement"]), nullptr}); + replacements.push_back({llvm::dyn_cast(cap["cast"]), nullptr}); + + return true; + }; ``` -The default target is `all`. Other valid targets are the name of the folders in `libs/` found in the passes root. +# Passes ## Running a pass diff --git a/src/Passes/Source/Apps/CMakeLists.txt b/src/Passes/Source/Apps/CMakeLists.txt index 60218d1de9..500c06c1d4 100644 --- a/src/Passes/Source/Apps/CMakeLists.txt +++ b/src/Passes/Source/Apps/CMakeLists.txt @@ -1,4 +1,4 @@ add_executable(qat Qat/Qat.cpp) target_link_libraries(qat ${llvm_libs}) -target_link_libraries(qat TransformationRule Rules AllocationManager) \ No newline at end of file +target_link_libraries(qat TransformationRule Rules AllocationManager Commandline Profiles) \ No newline at end of file diff --git a/src/Passes/Source/Apps/Qat/Qat.cpp b/src/Passes/Source/Apps/Qat/Qat.cpp index f77dc64391..7c9b2362ae 100644 --- a/src/Passes/Source/Apps/Qat/Qat.cpp +++ b/src/Passes/Source/Apps/Qat/Qat.cpp @@ -1,60 +1,99 @@ +#include "Commandline/ParameterParser.hpp" +#include "Commandline/Settings.hpp" #include "Llvm/Llvm.hpp" #include "Passes/TransformationRule/TransformationRule.hpp" +#include "Profiles/IProfile.hpp" #include "Rules/Factory.hpp" +#include +#include +#include + using namespace llvm; using namespace microsoft::quantum; -int main(int /*argc*/, char **argv) +class BaseProfile : public IProfile { - LLVMContext context; - SMDiagnostic error; - auto module = parseIRFile(argv[1], error, context); - if (module) - { +public: + llvm::ModulePassManager createGenerationModulePass( + llvm::PassBuilder & pass_builder, + llvm::PassBuilder::OptimizationLevel &optimisation_level) override; +}; + +llvm::ModulePassManager BaseProfile::createGenerationModulePass( + llvm::PassBuilder &pass_builder, llvm::PassBuilder::OptimizationLevel &optimisation_level) +{ + auto functionPassManager = pass_builder.buildFunctionSimplificationPipeline( + optimisation_level, llvm::PassBuilder::ThinLTOPhase::None, true); - llvm::PassBuilder passBuilder; - llvm::LoopAnalysisManager loopAnalysisManager(true); // true is just to output debug info - llvm::FunctionAnalysisManager functionAnalysisManager(true); - llvm::CGSCCAnalysisManager cGSCCAnalysisManager(true); - llvm::ModuleAnalysisManager moduleAnalysisManager(true); + RuleSet rule_set; - passBuilder.registerModuleAnalyses(moduleAnalysisManager); - passBuilder.registerCGSCCAnalyses(cGSCCAnalysisManager); - passBuilder.registerFunctionAnalyses(functionAnalysisManager); - passBuilder.registerLoopAnalyses(loopAnalysisManager); - // This is the important line: - passBuilder.crossRegisterProxies(loopAnalysisManager, functionAnalysisManager, - cGSCCAnalysisManager, moduleAnalysisManager); + // Defining the mapping + auto factory = RuleFactory(rule_set); - auto functionPassManager = passBuilder.buildFunctionSimplificationPipeline( - llvm::PassBuilder::OptimizationLevel::O1, llvm::PassBuilder::ThinLTOPhase::None, true); + factory.useStaticQuantumArrayAllocation(); + factory.useStaticQuantumAllocation(); + factory.useStaticResultAllocation(); - RuleSet rule_set; + factory.optimiseBranchQuatumOne(); + // factory.optimiseBranchQuatumZero(); - // Defining the mapping - auto factory = RuleFactory(rule_set); + factory.disableReferenceCounting(); + factory.disableAliasCounting(); + factory.disableStringSupport(); - factory.useStaticQuantumArrayAllocation(); - factory.useStaticQuantumAllocation(); - factory.useStaticResultAllocation(); + functionPassManager.addPass(TransformationRulePass(std::move(rule_set))); - factory.optimiseBranchQuatumOne(); - // factory.optimiseBranchQuatumZero(); + // https://llvm.org/docs/NewPassManager.html + // modulePassManager.addPass(createModuleToCGSCCPassAdaptor(...)); + // InlinerPass() - factory.disableReferenceCounting(); - factory.disableAliasCounting(); - factory.disableStringSupport(); + return pass_builder.buildPerModuleDefaultPipeline(llvm::PassBuilder::OptimizationLevel::O1); +} - functionPassManager.addPass(TransformationRulePass(std::move(rule_set))); +int main(int argc, char **argv) +{ + Settings settings{{ + {"debug", "false"}, + {"profile", "qir"}, + }}; - llvm::ModulePassManager modulePassManager = - passBuilder.buildPerModuleDefaultPipeline(llvm::PassBuilder::OptimizationLevel::O1); + ParameterParser parser(settings); + parser.addFlag("debug"); + parser.parseArgs(argc, argv); + settings.print(); - // https://llvm.org/docs/NewPassManager.html - // modulePassManager.addPass(createModuleToCGSCCPassAdaptor(...)); - // InlinerPass() + if (parser.arguments().empty()) + { + std::cerr << "usage: " << argv[0] << " [options] filename" << std::endl; + exit(-1); + } + LLVMContext context; + SMDiagnostic error; + auto module = parseIRFile(parser.getArg(0), error, context); + if (module) + { + bool debug = settings.get("debug") == "true"; + auto optimisation_level = llvm::PassBuilder::OptimizationLevel::O1; + BaseProfile profile; + + // Creating pass builder + llvm::PassBuilder pass_builder; + llvm::LoopAnalysisManager loopAnalysisManager(debug); + llvm::FunctionAnalysisManager functionAnalysisManager(debug); + llvm::CGSCCAnalysisManager cGSCCAnalysisManager(debug); + llvm::ModuleAnalysisManager moduleAnalysisManager(debug); + + pass_builder.registerModuleAnalyses(moduleAnalysisManager); + pass_builder.registerCGSCCAnalyses(cGSCCAnalysisManager); + pass_builder.registerFunctionAnalyses(functionAnalysisManager); + pass_builder.registerLoopAnalyses(loopAnalysisManager); + + pass_builder.crossRegisterProxies(loopAnalysisManager, functionAnalysisManager, + cGSCCAnalysisManager, moduleAnalysisManager); + + auto modulePassManager = profile.createGenerationModulePass(pass_builder, optimisation_level); modulePassManager.run(*module, moduleAnalysisManager); llvm::errs() << *module << "\n"; diff --git a/src/Passes/Source/CMakeLists.txt b/src/Passes/Source/CMakeLists.txt index 0fe06f4d51..bee8473e9a 100644 --- a/src/Passes/Source/CMakeLists.txt +++ b/src/Passes/Source/CMakeLists.txt @@ -1,5 +1,21 @@ cmake_minimum_required(VERSION 3.4.3) +# Creating Allocation Manager library +file(GLOB COMMANDLINE_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/Commandline/*.cpp) +add_library(Commandline + SHARED + ${COMMANDLINE_SRC}) + +target_include_directories( + Commandline + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/include" +) + +target_link_libraries(Commandline + "$<$:-undefined dynamic_lookup>") + + # Creating Allocation Manager library file(GLOB ALLOCATION_MGR_SOURCE RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/AllocationManager/*.cpp) add_library(AllocationManager @@ -36,5 +52,25 @@ target_link_libraries(Rules # Creating all of the passes library add_subdirectory(Passes) + + + +# Creating Allocation Manager library +file(GLOB PROFILE_SOURCE RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/Profiles/*.cpp) +add_library(Profiles + SHARED + ${PROFILE_SOURCE}) + +target_include_directories( + Profiles + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/include" +) + +target_link_libraries(Profiles + "$<$:-undefined dynamic_lookup>") + + + # Creating all commandline apps add_subdirectory(Apps) \ No newline at end of file diff --git a/src/Passes/Source/Commandline/ParameterParser.cpp b/src/Passes/Source/Commandline/ParameterParser.cpp new file mode 100644 index 0000000000..d559313baa --- /dev/null +++ b/src/Passes/Source/Commandline/ParameterParser.cpp @@ -0,0 +1,94 @@ +#include "Commandline/ParameterParser.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { + +ParameterParser::ParameterParser(Settings &settings) + : settings_{settings} +{} + +void ParameterParser::parseArgs(int argc, char **argv) +{ + uint64_t i = 1; + std::vector values; + while (i < static_cast(argc)) + { + values.push_back(parseSingleArg(argv[i])); + ++i; + } + + i = 0; + while (i < values.size()) + { + auto &v = values[i]; + ++i; + + if (!v.is_key) + { + arguments_.push_back(v.value); + continue; + } + + if (i >= values.size()) + { + settings_[v.value] = "true"; + continue; + } + + auto &v2 = values[i]; + if (!v2.is_key && hasValue(v.value)) + { + settings_[v.value] = v2.value; + ++i; + continue; + } + + settings_[v.value] = "true"; + } +} + +void ParameterParser::addFlag(String const &v) +{ + flags_.insert(v); +} + +ParameterParser::Arguments const &ParameterParser::arguments() const +{ + return arguments_; +} +ParameterParser::String const &ParameterParser::getArg(uint64_t const &n) +{ + return arguments_[n]; +} + +ParameterParser::ParsedValue ParameterParser::parseSingleArg(String key) +{ + bool is_key = false; + if (key.size() > 2 && key.substr(0, 2) == "--") + { + is_key = true; + key = key.substr(2); + } + else if (key.size() > 1 && key.substr(0, 1) == "-") + { + is_key = true; + key = key.substr(1); + } + return {is_key, key}; +} + +bool ParameterParser::hasValue(String const &key) +{ + if (flags_.find(key) != flags_.end()) + { + return false; + } + + return true; +} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Commandline/ParameterParser.hpp b/src/Passes/Source/Commandline/ParameterParser.hpp new file mode 100644 index 0000000000..c4d78e4d34 --- /dev/null +++ b/src/Passes/Source/Commandline/ParameterParser.hpp @@ -0,0 +1,43 @@ +#pragma once +#include "Commandline/Settings.hpp" + +#include +#include +#include +#include + +namespace microsoft { +namespace quantum { + +class ParameterParser +{ +public: + using String = std::string; + using Arguments = std::vector; + using Flags = std::unordered_set; + + struct ParsedValue + { + bool is_key{false}; + String value; + }; + + ParameterParser(Settings &settings); + + void parseArgs(int argc, char **argv); + void addFlag(String const &v); + Arguments const &arguments() const; + String const & getArg(uint64_t const &n); + +private: + ParsedValue parseSingleArg(String key); + + bool hasValue(String const &key); + + Settings &settings_; + Arguments arguments_{}; + Flags flags_{}; +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Commandline/Settings.hpp b/src/Passes/Source/Commandline/Settings.hpp new file mode 100644 index 0000000000..4c4bc6fdd6 --- /dev/null +++ b/src/Passes/Source/Commandline/Settings.hpp @@ -0,0 +1,63 @@ +#pragma once + +#include +#include +#include +#include + +namespace microsoft { +namespace quantum { + +class Settings +{ +public: + using String = std::string; + + using SettingsMap = std::unordered_map; + Settings(SettingsMap default_settings) + : settings_{default_settings} + {} + + String get(String const &name, String const &default_value) + { + auto it = settings_.find(name); + if (it == settings_.end()) + { + return default_value; + } + + return it->second; + } + + String get(String const &name) + { + auto it = settings_.find(name); + if (it == settings_.end()) + { + throw std::runtime_error("Could not find setting '" + name + "'."); + } + + return it->second; + } + + void print() + { + std::cout << "Settings" << std::endl; + for (auto &s : settings_) + { + std::cout << std::setw(20) << s.first << ": " << s.second << std::endl; + } + } + + String &operator[](String const &key) + { + return settings_[key]; + } + +private: + SettingsMap settings_; + friend class ParameterParser; +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/OpsCounter/LibOpsCounter.cpp b/src/Passes/Source/Passes/OpsCounter/LibOpsCounter.cpp deleted file mode 100644 index d3de240e11..0000000000 --- a/src/Passes/Source/Passes/OpsCounter/LibOpsCounter.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -#include "Llvm/Llvm.hpp" -#include "Passes/OpsCounter/OpsCounter.hpp" - -#include -#include - -namespace { -// Interface to plugin -llvm::PassPluginLibraryInfo getOpsCounterPluginInfo() -{ - using namespace microsoft::quantum; - using namespace llvm; - - return { - LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the printer - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "print") - { - fpm.addPass(OpsCounterPrinter(llvm::errs())); - return true; - } - return false; - }); - - pb.registerVectorizerStartEPCallback( - [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { - fpm.addPass(OpsCounterPrinter(llvm::errs())); - }); - - // Registering the analysis module - pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { - fam.registerPass([] { return OpsCounterAnalytics(); }); - }); - }}; -} - -} // namespace - -extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() -{ - return getOpsCounterPluginInfo(); -} diff --git a/src/Passes/Source/Passes/OpsCounter/OpsCounter.cpp b/src/Passes/Source/Passes/OpsCounter/OpsCounter.cpp deleted file mode 100644 index 154368bf21..0000000000 --- a/src/Passes/Source/Passes/OpsCounter/OpsCounter.cpp +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -#include "Passes/OpsCounter/OpsCounter.hpp" - -#include "Llvm/Llvm.hpp" - -#include -#include - -namespace microsoft { -namespace quantum { -OpsCounterAnalytics::Result OpsCounterAnalytics::run(llvm::Function &function, - llvm::FunctionAnalysisManager & /*unused*/) -{ - OpsCounterAnalytics::Result opcode_map; - for (auto &basic_block : function) - { - for (auto &instruction : basic_block) - { - /* - TODO(tfr): Enbale in LLVM 12 or later - if (instruction.isDebugOrPseudoInst()) - { - continue; - } - */ - - auto name = instruction.getOpcodeName(); - - if (opcode_map.find(name) == opcode_map.end()) - { - opcode_map[instruction.getOpcodeName()] = 1; - } - else - { - opcode_map[instruction.getOpcodeName()]++; - } - } - } - - return opcode_map; -} - -OpsCounterPrinter::OpsCounterPrinter(llvm::raw_ostream &out_stream) - : out_stream_(out_stream) -{} - -llvm::PreservedAnalyses OpsCounterPrinter::run(llvm::Function & function, - llvm::FunctionAnalysisManager &fam) -{ - auto &opcode_map = fam.getResult(function); - - out_stream_ << "Stats for '" << function.getName() << "'\n"; - out_stream_ << "===========================\n"; - - constexpr auto STR1 = "Opcode"; - constexpr auto STR2 = "# Used"; - out_stream_ << llvm::format("%-15s %-8s\n", STR1, STR2); - out_stream_ << "---------------------------" - << "\n"; - - for (auto const &instruction : opcode_map) - { - out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), - instruction.second); - } - out_stream_ << "---------------------------" - << "\n\n"; - - return llvm::PreservedAnalyses::all(); -} - -bool OpsCounterPrinter::isRequired() -{ - return true; -} - -llvm::AnalysisKey OpsCounterAnalytics::Key; - -} // namespace quantum -} // namespace microsoft diff --git a/src/Passes/Source/Passes/OpsCounter/OpsCounter.hpp b/src/Passes/Source/Passes/OpsCounter/OpsCounter.hpp deleted file mode 100644 index b385d0cb01..0000000000 --- a/src/Passes/Source/Passes/OpsCounter/OpsCounter.hpp +++ /dev/null @@ -1,67 +0,0 @@ -#pragma once -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -#include "Llvm/Llvm.hpp" - -namespace microsoft { -namespace quantum { - -class OpsCounterAnalytics : public llvm::AnalysisInfoMixin -{ -public: - using Result = llvm::StringMap; - - /// Constructors and destructors - /// @{ - OpsCounterAnalytics() = default; - OpsCounterAnalytics(OpsCounterAnalytics const &) = delete; - OpsCounterAnalytics(OpsCounterAnalytics &&) = default; - ~OpsCounterAnalytics() = default; - /// @} - - /// Operators - /// @{ - OpsCounterAnalytics &operator=(OpsCounterAnalytics const &) = delete; - OpsCounterAnalytics &operator=(OpsCounterAnalytics &&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/); - /// @} - -private: - static llvm::AnalysisKey Key; // NOLINT - friend struct llvm::AnalysisInfoMixin; -}; - -class OpsCounterPrinter : public llvm::PassInfoMixin -{ -public: - /// Constructors and destructors - /// @{ - explicit OpsCounterPrinter(llvm::raw_ostream &out_stream); - OpsCounterPrinter() = delete; - OpsCounterPrinter(OpsCounterPrinter const &) = delete; - OpsCounterPrinter(OpsCounterPrinter &&) = default; - ~OpsCounterPrinter() = default; - /// @} - - /// Operators - /// @{ - OpsCounterPrinter &operator=(OpsCounterPrinter const &) = delete; - OpsCounterPrinter &operator=(OpsCounterPrinter &&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); - static bool isRequired(); - /// @} -private: - llvm::raw_ostream &out_stream_; -}; - -} // namespace quantum -} // namespace microsoft diff --git a/src/Passes/Source/Profiles/IProfile.cpp b/src/Passes/Source/Profiles/IProfile.cpp new file mode 100644 index 0000000000..f9e2370ae5 --- /dev/null +++ b/src/Passes/Source/Profiles/IProfile.cpp @@ -0,0 +1,9 @@ +#include "Profiles/IProfile.hpp" + +namespace microsoft { +namespace quantum { + +IProfile::~IProfile() = default; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Profiles/IProfile.hpp b/src/Passes/Source/Profiles/IProfile.hpp new file mode 100644 index 0000000000..c8e973a932 --- /dev/null +++ b/src/Passes/Source/Profiles/IProfile.hpp @@ -0,0 +1,19 @@ +#pragma once + +#include "Llvm/Llvm.hpp" + +namespace microsoft { +namespace quantum { + +class IProfile +{ +public: + IProfile() = default; + virtual ~IProfile(); + virtual llvm::ModulePassManager createGenerationModulePass( + llvm::PassBuilder & pass_builder, + llvm::PassBuilder::OptimizationLevel &optimisation_level) = 0; +}; + +} // namespace quantum +} // namespace microsoft From c0cb5c01ca7ef9cd9c34b771a68aaba677496876 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 12 Aug 2021 11:12:12 +0200 Subject: [PATCH 078/106] Adding profile interface --- src/Passes/Source/Apps/Qat/Qat.cpp | 93 +++++++++++----------- src/Passes/Source/Profiles/BaseProfile.cpp | 48 +++++++++++ src/Passes/Source/Profiles/BaseProfile.hpp | 17 ++++ src/Passes/Source/Profiles/IProfile.hpp | 8 +- 4 files changed, 116 insertions(+), 50 deletions(-) create mode 100644 src/Passes/Source/Profiles/BaseProfile.cpp create mode 100644 src/Passes/Source/Profiles/BaseProfile.hpp diff --git a/src/Passes/Source/Apps/Qat/Qat.cpp b/src/Passes/Source/Apps/Qat/Qat.cpp index 7c9b2362ae..a1e4f762a2 100644 --- a/src/Passes/Source/Apps/Qat/Qat.cpp +++ b/src/Passes/Source/Apps/Qat/Qat.cpp @@ -1,9 +1,8 @@ #include "Commandline/ParameterParser.hpp" #include "Commandline/Settings.hpp" #include "Llvm/Llvm.hpp" -#include "Passes/TransformationRule/TransformationRule.hpp" +#include "Profiles/BaseProfile.hpp" #include "Profiles/IProfile.hpp" -#include "Rules/Factory.hpp" #include #include @@ -12,72 +11,49 @@ using namespace llvm; using namespace microsoft::quantum; -class BaseProfile : public IProfile -{ -public: - llvm::ModulePassManager createGenerationModulePass( - llvm::PassBuilder & pass_builder, - llvm::PassBuilder::OptimizationLevel &optimisation_level) override; -}; - -llvm::ModulePassManager BaseProfile::createGenerationModulePass( - llvm::PassBuilder &pass_builder, llvm::PassBuilder::OptimizationLevel &optimisation_level) -{ - auto functionPassManager = pass_builder.buildFunctionSimplificationPipeline( - optimisation_level, llvm::PassBuilder::ThinLTOPhase::None, true); - - RuleSet rule_set; - - // Defining the mapping - auto factory = RuleFactory(rule_set); - - factory.useStaticQuantumArrayAllocation(); - factory.useStaticQuantumAllocation(); - factory.useStaticResultAllocation(); - - factory.optimiseBranchQuatumOne(); - // factory.optimiseBranchQuatumZero(); - - factory.disableReferenceCounting(); - factory.disableAliasCounting(); - factory.disableStringSupport(); - - functionPassManager.addPass(TransformationRulePass(std::move(rule_set))); - - // https://llvm.org/docs/NewPassManager.html - // modulePassManager.addPass(createModuleToCGSCCPassAdaptor(...)); - // InlinerPass() - - return pass_builder.buildPerModuleDefaultPipeline(llvm::PassBuilder::OptimizationLevel::O1); -} - int main(int argc, char **argv) { + // Parsing commmandline arguments Settings settings{{ {"debug", "false"}, - {"profile", "qir"}, + {"generate", "false"}, + {"validate", "false"}, + {"profile", "base-profile"}, }}; ParameterParser parser(settings); parser.addFlag("debug"); + parser.addFlag("generate"); + parser.addFlag("validate"); + parser.parseArgs(argc, argv); - settings.print(); if (parser.arguments().empty()) { - std::cerr << "usage: " << argv[0] << " [options] filename" << std::endl; + std::cerr << "Usage: " << argv[0] << " [options] filename" << std::endl; exit(-1); } + // Loading IR LLVMContext context; SMDiagnostic error; auto module = parseIRFile(parser.getArg(0), error, context); - if (module) + + if (!module) { - bool debug = settings.get("debug") == "true"; - auto optimisation_level = llvm::PassBuilder::OptimizationLevel::O1; - BaseProfile profile; + std::cerr << "Invalid IR." << std::endl; + exit(-1); + } + + // Generating IR + bool debug = settings.get("debug") == "true"; + bool generate = settings.get("generate") == "true"; + bool validate = settings.get("validate") == "true"; + auto optimisation_level = llvm::PassBuilder::OptimizationLevel::O1; + BaseProfile profile; + if (generate) + { // Creating pass builder llvm::PassBuilder pass_builder; llvm::LoopAnalysisManager loopAnalysisManager(debug); @@ -99,5 +75,26 @@ int main(int argc, char **argv) llvm::errs() << *module << "\n"; } + if (validate) + { + // Creating pass builder + llvm::PassBuilder pass_builder; + llvm::LoopAnalysisManager loopAnalysisManager(debug); + llvm::FunctionAnalysisManager functionAnalysisManager(debug); + llvm::CGSCCAnalysisManager cGSCCAnalysisManager(debug); + llvm::ModuleAnalysisManager moduleAnalysisManager(debug); + + pass_builder.registerModuleAnalyses(moduleAnalysisManager); + pass_builder.registerCGSCCAnalyses(cGSCCAnalysisManager); + pass_builder.registerFunctionAnalyses(functionAnalysisManager); + pass_builder.registerLoopAnalyses(loopAnalysisManager); + + pass_builder.crossRegisterProxies(loopAnalysisManager, functionAnalysisManager, + cGSCCAnalysisManager, moduleAnalysisManager); + + auto modulePassManager = profile.createValidationModulePass(pass_builder, optimisation_level); + modulePassManager.run(*module, moduleAnalysisManager); + } + return 0; } diff --git a/src/Passes/Source/Profiles/BaseProfile.cpp b/src/Passes/Source/Profiles/BaseProfile.cpp new file mode 100644 index 0000000000..24292cd94b --- /dev/null +++ b/src/Passes/Source/Profiles/BaseProfile.cpp @@ -0,0 +1,48 @@ +#include "Profiles/BaseProfile.hpp" + +#include "Llvm/Llvm.hpp" +#include "Passes/TransformationRule/TransformationRule.hpp" +#include "Rules/Factory.hpp" + +namespace microsoft { +namespace quantum { + +llvm::ModulePassManager BaseProfile::createGenerationModulePass( + llvm::PassBuilder &pass_builder, llvm::PassBuilder::OptimizationLevel &optimisation_level) +{ + auto functionPassManager = pass_builder.buildFunctionSimplificationPipeline( + optimisation_level, llvm::PassBuilder::ThinLTOPhase::None, true); + + RuleSet rule_set; + + // Defining the mapping + auto factory = RuleFactory(rule_set); + + factory.useStaticQuantumArrayAllocation(); + factory.useStaticQuantumAllocation(); + factory.useStaticResultAllocation(); + + factory.optimiseBranchQuatumOne(); + // factory.optimiseBranchQuatumZero(); + + factory.disableReferenceCounting(); + factory.disableAliasCounting(); + factory.disableStringSupport(); + + functionPassManager.addPass(TransformationRulePass(std::move(rule_set))); + + // https://llvm.org/docs/NewPassManager.html + // modulePassManager.addPass(createModuleToCGSCCPassAdaptor(...)); + // InlinerPass() + + return pass_builder.buildPerModuleDefaultPipeline(llvm::PassBuilder::OptimizationLevel::O1); +} + +llvm::ModulePassManager BaseProfile::createValidationModulePass( + llvm::PassBuilder &, llvm::PassBuilder::OptimizationLevel &) +{ + throw std::runtime_error("Validator not implmented yet"); +} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Profiles/BaseProfile.hpp b/src/Passes/Source/Profiles/BaseProfile.hpp new file mode 100644 index 0000000000..59014beb88 --- /dev/null +++ b/src/Passes/Source/Profiles/BaseProfile.hpp @@ -0,0 +1,17 @@ +#pragma once +#include "Llvm/Llvm.hpp" +#include "Profiles/IProfile.hpp" + +namespace microsoft { +namespace quantum { + +class BaseProfile : public IProfile +{ +public: + llvm::ModulePassManager createGenerationModulePass( + PassBuilder &pass_builder, OptimizationLevel &optimisation_level) override; + llvm::ModulePassManager createValidationModulePass( + PassBuilder &pass_builder, OptimizationLevel &optimisation_level) override; +}; +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Profiles/IProfile.hpp b/src/Passes/Source/Profiles/IProfile.hpp index c8e973a932..29a48b09cd 100644 --- a/src/Passes/Source/Profiles/IProfile.hpp +++ b/src/Passes/Source/Profiles/IProfile.hpp @@ -8,11 +8,15 @@ namespace quantum { class IProfile { public: + using PassBuilder = llvm::PassBuilder; + using OptimizationLevel = PassBuilder::OptimizationLevel; + IProfile() = default; virtual ~IProfile(); virtual llvm::ModulePassManager createGenerationModulePass( - llvm::PassBuilder & pass_builder, - llvm::PassBuilder::OptimizationLevel &optimisation_level) = 0; + PassBuilder &pass_builder, OptimizationLevel &optimisation_level) = 0; + virtual llvm::ModulePassManager createValidationModulePass( + PassBuilder &pass_builder, OptimizationLevel &optimisation_level) = 0; }; } // namespace quantum From b02fed5d70c8a488b911b21a6bff21259425353a Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 12 Aug 2021 11:28:04 +0200 Subject: [PATCH 079/106] Updating CLI --- src/Passes/README.md | 14 ++++++++++++++ src/Passes/Source/Profiles/BaseProfile.hpp | 1 + 2 files changed, 15 insertions(+) diff --git a/src/Passes/README.md b/src/Passes/README.md index b014a9edbf..4f6ae93a83 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -2,6 +2,20 @@ # Getting started +## Quick start + +Once the project is built (see next sections), you can generate a new QIR as follows: + +```sh +./Source/Apps/qat --generate --profile baseProfile ../examples/QubitAllocationAnalysis/analysis-example.ll +``` + +Likewise, you can validate that a QIR follows a specification by running: + +```sh +./Source/Apps/qat --validate --profile baseProfile ../examples/QubitAllocationAnalysis/analysis-example.ll +``` + ## Dependencies This library is written in C++ and depends on: diff --git a/src/Passes/Source/Profiles/BaseProfile.hpp b/src/Passes/Source/Profiles/BaseProfile.hpp index 59014beb88..55f9bffd0c 100644 --- a/src/Passes/Source/Profiles/BaseProfile.hpp +++ b/src/Passes/Source/Profiles/BaseProfile.hpp @@ -13,5 +13,6 @@ class BaseProfile : public IProfile llvm::ModulePassManager createValidationModulePass( PassBuilder &pass_builder, OptimizationLevel &optimisation_level) override; }; + } // namespace quantum } // namespace microsoft From f72c90c920ad1d35e0fc5ef43a5c4a52a8b61f4b Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 12 Aug 2021 11:43:32 +0200 Subject: [PATCH 080/106] Fixing profile bug --- .../ResourceRemapper/LibResourceRemapper.cpp | 37 +++++++++++++++++++ .../ResourceRemapper/ResourceRemapper.cpp | 29 +++++++++++++++ .../ResourceRemapper/ResourceRemapper.hpp | 35 ++++++++++++++++++ src/Passes/Source/Profiles/BaseProfile.cpp | 5 ++- 4 files changed, 105 insertions(+), 1 deletion(-) create mode 100644 src/Passes/Source/Passes/ResourceRemapper/LibResourceRemapper.cpp create mode 100644 src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.cpp create mode 100644 src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.hpp diff --git a/src/Passes/Source/Passes/ResourceRemapper/LibResourceRemapper.cpp b/src/Passes/Source/Passes/ResourceRemapper/LibResourceRemapper.cpp new file mode 100644 index 0000000000..29a7a8e94f --- /dev/null +++ b/src/Passes/Source/Passes/ResourceRemapper/LibResourceRemapper.cpp @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" +#include "ResourceRemapper/ResourceRemapper.hpp" + +#include +#include + +namespace { +llvm::PassPluginLibraryInfo getResourceRemapperPluginInfo() +{ + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "ResourceRemapper", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the pass + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "resource-remapper") + { + fpm.addPass(ResourceRemapperPass()); + return true; + } + + return false; + }); + }}; +} +} // namespace + +// Interface for loading the plugin +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return getResourceRemapperPluginInfo(); +} diff --git a/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.cpp b/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.cpp new file mode 100644 index 0000000000..5bc5b44207 --- /dev/null +++ b/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.cpp @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "ResourceRemapper/ResourceRemapper.hpp" + +#include "Llvm/Llvm.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +llvm::PreservedAnalyses ResourceRemapperPass::run(llvm::Function &function, + llvm::FunctionAnalysisManager & /*fam*/) +{ + // Pass body + + llvm::errs() << "Implement your pass here: " << function.getName() << "\n"; + + return llvm::PreservedAnalyses::all(); +} + +bool ResourceRemapperPass::isRequired() +{ + return true; +} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.hpp b/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.hpp new file mode 100644 index 0000000000..52b8076522 --- /dev/null +++ b/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.hpp @@ -0,0 +1,35 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" + +namespace microsoft { +namespace quantum { + +class ResourceRemapperPass : public llvm::PassInfoMixin +{ +public: + /// Constructors and destructors + /// @{ + ResourceRemapperPass() = default; + ResourceRemapperPass(ResourceRemapperPass const &) = default; + ResourceRemapperPass(ResourceRemapperPass &&) = default; + ~ResourceRemapperPass() = default; + /// @} + + /// Operators + /// @{ + ResourceRemapperPass &operator=(ResourceRemapperPass const &) = default; + ResourceRemapperPass &operator=(ResourceRemapperPass &&) = default; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); + static bool isRequired(); + /// @} +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Profiles/BaseProfile.cpp b/src/Passes/Source/Profiles/BaseProfile.cpp index 24292cd94b..21a2a18ecf 100644 --- a/src/Passes/Source/Profiles/BaseProfile.cpp +++ b/src/Passes/Source/Profiles/BaseProfile.cpp @@ -35,7 +35,10 @@ llvm::ModulePassManager BaseProfile::createGenerationModulePass( // modulePassManager.addPass(createModuleToCGSCCPassAdaptor(...)); // InlinerPass() - return pass_builder.buildPerModuleDefaultPipeline(llvm::PassBuilder::OptimizationLevel::O1); + auto ret = pass_builder.buildPerModuleDefaultPipeline(llvm::PassBuilder::OptimizationLevel::O1); + ret.addPass(createModuleToFunctionPassAdaptor(std::move(functionPassManager))); + + return ret; } llvm::ModulePassManager BaseProfile::createValidationModulePass( From 139f3e0ed88002b37cce12665ec390bd98696181 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 12 Aug 2021 14:43:39 +0200 Subject: [PATCH 081/106] Fixing various bugs --- src/Passes/Source/Apps/CMakeLists.txt | 2 +- src/Passes/Source/Apps/Qat/Qat.cpp | 13 ++++++++-- src/Passes/Source/Llvm/Llvm.hpp | 3 +++ src/Passes/Source/Profiles/BaseProfile.cpp | 28 ++++++++++++++++------ src/Passes/Source/Profiles/BaseProfile.hpp | 11 +++++---- src/Passes/Source/Profiles/IProfile.hpp | 16 ++++++++----- 6 files changed, 53 insertions(+), 20 deletions(-) diff --git a/src/Passes/Source/Apps/CMakeLists.txt b/src/Passes/Source/Apps/CMakeLists.txt index 500c06c1d4..3162c7e35f 100644 --- a/src/Passes/Source/Apps/CMakeLists.txt +++ b/src/Passes/Source/Apps/CMakeLists.txt @@ -1,4 +1,4 @@ add_executable(qat Qat/Qat.cpp) target_link_libraries(qat ${llvm_libs}) -target_link_libraries(qat TransformationRule Rules AllocationManager Commandline Profiles) \ No newline at end of file +target_link_libraries(qat ExpandStaticAllocation QubitAllocationAnalysis TransformationRule Rules AllocationManager Commandline Profiles) \ No newline at end of file diff --git a/src/Passes/Source/Apps/Qat/Qat.cpp b/src/Passes/Source/Apps/Qat/Qat.cpp index a1e4f762a2..a0320a9f21 100644 --- a/src/Passes/Source/Apps/Qat/Qat.cpp +++ b/src/Passes/Source/Apps/Qat/Qat.cpp @@ -45,6 +45,8 @@ int main(int argc, char **argv) exit(-1); } + // settings.print(); + // Generating IR bool debug = settings.get("debug") == "true"; bool generate = settings.get("generate") == "true"; @@ -52,6 +54,9 @@ int main(int argc, char **argv) auto optimisation_level = llvm::PassBuilder::OptimizationLevel::O1; BaseProfile profile; + // Worth looking at: + // https://opensource.apple.com/source/lldb/lldb-76/llvm/tools/opt/opt.cpp + if (generate) { // Creating pass builder @@ -69,7 +74,10 @@ int main(int argc, char **argv) pass_builder.crossRegisterProxies(loopAnalysisManager, functionAnalysisManager, cGSCCAnalysisManager, moduleAnalysisManager); - auto modulePassManager = profile.createGenerationModulePass(pass_builder, optimisation_level); + profile.addFunctionAnalyses(functionAnalysisManager); + auto modulePassManager = + profile.createGenerationModulePass(pass_builder, optimisation_level, debug); + modulePassManager.run(*module, moduleAnalysisManager); llvm::errs() << *module << "\n"; @@ -92,7 +100,8 @@ int main(int argc, char **argv) pass_builder.crossRegisterProxies(loopAnalysisManager, functionAnalysisManager, cGSCCAnalysisManager, moduleAnalysisManager); - auto modulePassManager = profile.createValidationModulePass(pass_builder, optimisation_level); + auto modulePassManager = + profile.createValidationModulePass(pass_builder, optimisation_level, debug); modulePassManager.run(*module, moduleAnalysisManager); } diff --git a/src/Passes/Source/Llvm/Llvm.hpp b/src/Passes/Source/Llvm/Llvm.hpp index 4b5a2ab4b5..dcb7cb234d 100644 --- a/src/Passes/Source/Llvm/Llvm.hpp +++ b/src/Passes/Source/Llvm/Llvm.hpp @@ -51,6 +51,9 @@ #include "llvm/Transforms/IPO/Inliner.h" #include "llvm/Transforms/Scalar/LoopUnrollPass.h" +// Testing +#include "llvm/LinkAllPasses.h" + #if defined(__clang__) #pragma clang diagnostic pop #endif diff --git a/src/Passes/Source/Profiles/BaseProfile.cpp b/src/Passes/Source/Profiles/BaseProfile.cpp index 21a2a18ecf..b8ebf0f368 100644 --- a/src/Passes/Source/Profiles/BaseProfile.cpp +++ b/src/Passes/Source/Profiles/BaseProfile.cpp @@ -1,6 +1,8 @@ #include "Profiles/BaseProfile.hpp" #include "Llvm/Llvm.hpp" +#include "Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp" +#include "Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" #include "Passes/TransformationRule/TransformationRule.hpp" #include "Rules/Factory.hpp" @@ -8,10 +10,16 @@ namespace microsoft { namespace quantum { llvm::ModulePassManager BaseProfile::createGenerationModulePass( - llvm::PassBuilder &pass_builder, llvm::PassBuilder::OptimizationLevel &optimisation_level) + llvm::PassBuilder &pass_builder, llvm::PassBuilder::OptimizationLevel &optimisation_level, + bool debug) { - auto functionPassManager = pass_builder.buildFunctionSimplificationPipeline( - optimisation_level, llvm::PassBuilder::ThinLTOPhase::None, true); + auto ret = pass_builder.buildPerModuleDefaultPipeline(llvm::PassBuilder::OptimizationLevel::O1); + // buildPerModuleDefaultPipeline buildModuleOptimizationPipeline + auto function_pass_manager = pass_builder.buildFunctionSimplificationPipeline( + optimisation_level, llvm::PassBuilder::ThinLTOPhase::PreLink, debug); + + // TODO: Maybe this should be done at a module level + function_pass_manager.addPass(ExpandStaticAllocationPass()); RuleSet rule_set; @@ -29,23 +37,29 @@ llvm::ModulePassManager BaseProfile::createGenerationModulePass( factory.disableAliasCounting(); factory.disableStringSupport(); - functionPassManager.addPass(TransformationRulePass(std::move(rule_set))); + function_pass_manager.addPass(TransformationRulePass(std::move(rule_set))); // https://llvm.org/docs/NewPassManager.html // modulePassManager.addPass(createModuleToCGSCCPassAdaptor(...)); // InlinerPass() - auto ret = pass_builder.buildPerModuleDefaultPipeline(llvm::PassBuilder::OptimizationLevel::O1); - ret.addPass(createModuleToFunctionPassAdaptor(std::move(functionPassManager))); + ret.addPass(createModuleToFunctionPassAdaptor(std::move(function_pass_manager))); + + ret.addPass(llvm::AlwaysInlinerPass()); return ret; } llvm::ModulePassManager BaseProfile::createValidationModulePass( - llvm::PassBuilder &, llvm::PassBuilder::OptimizationLevel &) + llvm::PassBuilder &, llvm::PassBuilder::OptimizationLevel &, bool) { throw std::runtime_error("Validator not implmented yet"); } +void BaseProfile::addFunctionAnalyses(FunctionAnalysisManager &fam) +{ + fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); +} + } // namespace quantum } // namespace microsoft diff --git a/src/Passes/Source/Profiles/BaseProfile.hpp b/src/Passes/Source/Profiles/BaseProfile.hpp index 55f9bffd0c..55a22db085 100644 --- a/src/Passes/Source/Profiles/BaseProfile.hpp +++ b/src/Passes/Source/Profiles/BaseProfile.hpp @@ -8,10 +8,13 @@ namespace quantum { class BaseProfile : public IProfile { public: - llvm::ModulePassManager createGenerationModulePass( - PassBuilder &pass_builder, OptimizationLevel &optimisation_level) override; - llvm::ModulePassManager createValidationModulePass( - PassBuilder &pass_builder, OptimizationLevel &optimisation_level) override; + llvm::ModulePassManager createGenerationModulePass(PassBuilder & pass_builder, + OptimizationLevel &optimisation_level, + bool debug) override; + llvm::ModulePassManager createValidationModulePass(PassBuilder & pass_builder, + OptimizationLevel &optimisation_level, + bool debug) override; + void addFunctionAnalyses(FunctionAnalysisManager &fam) override; }; } // namespace quantum diff --git a/src/Passes/Source/Profiles/IProfile.hpp b/src/Passes/Source/Profiles/IProfile.hpp index 29a48b09cd..d832bf0ac7 100644 --- a/src/Passes/Source/Profiles/IProfile.hpp +++ b/src/Passes/Source/Profiles/IProfile.hpp @@ -8,15 +8,19 @@ namespace quantum { class IProfile { public: - using PassBuilder = llvm::PassBuilder; - using OptimizationLevel = PassBuilder::OptimizationLevel; + using PassBuilder = llvm::PassBuilder; + using OptimizationLevel = PassBuilder::OptimizationLevel; + using FunctionAnalysisManager = llvm::FunctionAnalysisManager; IProfile() = default; virtual ~IProfile(); - virtual llvm::ModulePassManager createGenerationModulePass( - PassBuilder &pass_builder, OptimizationLevel &optimisation_level) = 0; - virtual llvm::ModulePassManager createValidationModulePass( - PassBuilder &pass_builder, OptimizationLevel &optimisation_level) = 0; + virtual llvm::ModulePassManager createGenerationModulePass(PassBuilder & pass_builder, + OptimizationLevel &optimisation_level, + bool debug) = 0; + virtual llvm::ModulePassManager createValidationModulePass(PassBuilder & pass_builder, + OptimizationLevel &optimisation_level, + bool debug) = 0; + virtual void addFunctionAnalyses(FunctionAnalysisManager &fam) = 0; }; } // namespace quantum From c4da2e960444ba2f59a86b9ce981fd835577d30e Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 12 Aug 2021 15:54:15 +0200 Subject: [PATCH 082/106] Adding documentation --- src/Passes/README.md | 169 + src/Passes/Source/Apps/Qat/Qat.cpp | 8 + src/Passes/Source/Llvm/Llvm.hpp | 3 +- src/Passes/Source/Profiles/BaseProfile.cpp | 6 +- .../ConstSizeArray_BASE_74058.csproj | 0 .../ConstSizeArray/qir/ConstSizeArray.ll | 3445 ----------------- 6 files changed, 184 insertions(+), 3447 deletions(-) delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_BASE_74058.csproj delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll diff --git a/src/Passes/README.md b/src/Passes/README.md index 4f6ae93a83..fac6675081 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -16,6 +16,175 @@ Likewise, you can validate that a QIR follows a specification by running: ./Source/Apps/qat --validate --profile baseProfile ../examples/QubitAllocationAnalysis/analysis-example.ll ``` +## Example + +In this example, we start with a QIR generated by the Q# frontend. Rather than giving the full 3445 lines of QIR, we instead give the frontend code: + +``` +namespace TeleportChain { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Measurement; + open Microsoft.Quantum.Preparation; + + operation PrepareEntangledPair(left : Qubit, right : Qubit) : Unit is Adj + Ctl { + H(left); + CNOT(left, right); + } + + operation ApplyCorrection(src : Qubit, intermediary : Qubit, dest : Qubit) : Unit { + if (MResetZ(src) == One) { Z(dest); } + if (MResetZ(intermediary) == One) { X(dest); } + } + + operation TeleportQubitUsingPresharedEntanglement(src : Qubit, intermediary : Qubit, dest : Qubit) : Unit { + Adjoint PrepareEntangledPair(src, intermediary); + ApplyCorrection(src, intermediary, dest); + } + + operation TeleportQubit(src : Qubit, dest : Qubit) : Unit { + use intermediary = Qubit(); + PrepareEntangledPair(intermediary, dest); + TeleportQubitUsingPresharedEntanglement(src, intermediary, dest); + } + + operation DemonstrateEntanglementSwapping() : (Result, Result) { + use (reference, src, intermediary, dest) = (Qubit(), Qubit(), Qubit(), Qubit()); + PrepareEntangledPair(reference, src); + TeleportQubit(src, dest); + return (MResetZ(reference), MResetZ(dest)); + } + + @EntryPoint() + operation DemonstrateTeleportationUsingPresharedEntanglement() : Unit { + let nPairs = 2; + use (leftMessage, rightMessage, leftPreshared, rightPreshared) = (Qubit(), Qubit(), Qubit[nPairs], Qubit[nPairs]); + PrepareEntangledPair(leftMessage, rightMessage); + for i in 0..nPairs-1 { + PrepareEntangledPair(leftPreshared[i], rightPreshared[i]); + } + + TeleportQubitUsingPresharedEntanglement(rightMessage, leftPreshared[0], rightPreshared[0]); + for i in 1..nPairs-1 { + TeleportQubitUsingPresharedEntanglement(rightPreshared[i-1], leftPreshared[i], rightPreshared[i]); + } + + let _ = MResetZ(leftMessage); + let _ = MResetZ(rightPreshared[nPairs-1]); + } +} +``` + +Once compiled and the initial QIR is generated and save in the file `analysis-example.ll`, we execute the command + +``` +./Source/Apps/qat --generate --profile baseProfile ./analysis-example.ll +``` + +The QAT tool will now attempt to map the QIR in `analysis-example.ll` into a QIR which is compatible with the base format. Removing type and function declarations, the correspoding code reads: + +``` +; ModuleID = './analysis-example.ll' +source_filename = "./analysis-example.ll" + +define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { +entry: + %leftMessage = inttoptr i64 0 to %Qubit* + %rightMessage = inttoptr i64 1 to %Qubit* + call void @__quantum__qis__h(%Qubit* %leftMessage) + call void @__quantum__qis__cnot(%Qubit* %leftMessage, %Qubit* %rightMessage) + %0 = inttoptr i64 0 to %Qubit* + %1 = inttoptr i64 2 to %Qubit* + call void @__quantum__qis__h(%Qubit* %0) + call void @__quantum__qis__cnot(%Qubit* %0, %Qubit* %1) + %2 = inttoptr i64 1 to %Qubit* + %3 = inttoptr i64 3 to %Qubit* + call void @__quantum__qis__h(%Qubit* %2) + call void @__quantum__qis__cnot(%Qubit* %2, %Qubit* %3) + %4 = inttoptr i64 0 to %Qubit* + %5 = inttoptr i64 2 to %Qubit* + call void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body.1(%Qubit* %rightMessage, %Qubit* %4, %Qubit* %5) + %6 = inttoptr i64 2 to %Qubit* + %7 = inttoptr i64 1 to %Qubit* + %8 = inttoptr i64 3 to %Qubit* + call void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2(%Qubit* %6, %Qubit* %7, %Qubit* %8) + %result.i = inttoptr i64 0 to %Result* + call void @__quantum__qis__mz__body(%Qubit* %leftMessage, %Result* %result.i) + call void @__quantum__qis__reset__body(%Qubit* %leftMessage) + %9 = inttoptr i64 3 to %Qubit* + %result.i1 = inttoptr i64 1 to %Result* + call void @__quantum__qis__mz__body(%Qubit* %9, %Result* %result.i1) + call void @__quantum__qis__reset__body(%Qubit* %9) + ret void +} + +define internal fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body.1(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { +entry: + call void @__quantum__qis__cnot(%Qubit* %src, %Qubit* %intermediary) + call void @__quantum__qis__h(%Qubit* %src) + %result.i.i = inttoptr i64 4 to %Result* + %0 = call i1 @__quantum__qir__read_result(%Result* %result.i.i) + call void @__quantum__qis__mz__body(%Qubit* %src, %Result* %result.i.i) + call void @__quantum__qis__reset__body(%Qubit* %src) + br i1 %0, label %then0__1.i, label %continue__1.i + +then0__1.i: ; preds = %entry + call void @__quantum__qis__z(%Qubit* %dest) + br label %continue__1.i + +continue__1.i: ; preds = %then0__1.i, %entry + %result.i1.i = inttoptr i64 5 to %Result* + %1 = call i1 @__quantum__qir__read_result(%Result* %result.i1.i) + call void @__quantum__qis__mz__body(%Qubit* %intermediary, %Result* %result.i1.i) + call void @__quantum__qis__reset__body(%Qubit* %intermediary) + br i1 %1, label %then0__2.i, label %TeleportChain__ApplyCorrection__body.exit + +then0__2.i: ; preds = %continue__1.i + call void @__quantum__qis__x(%Qubit* %dest) + br label %TeleportChain__ApplyCorrection__body.exit + +TeleportChain__ApplyCorrection__body.exit: ; preds = %then0__2.i, %continue__1.i + ret void +} + +define internal fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { +entry: + call void @__quantum__qis__cnot(%Qubit* %src, %Qubit* %intermediary) + call void @__quantum__qis__h(%Qubit* %src) + %result.i.i = inttoptr i64 6 to %Result* + %0 = call i1 @__quantum__qir__read_result(%Result* %result.i.i) + call void @__quantum__qis__mz__body(%Qubit* %src, %Result* %result.i.i) + call void @__quantum__qis__reset__body(%Qubit* %src) + br i1 %0, label %then0__1.i, label %continue__1.i + +then0__1.i: ; preds = %entry + call void @__quantum__qis__z(%Qubit* %dest) + br label %continue__1.i + +continue__1.i: ; preds = %then0__1.i, %entry + %result.i1.i = inttoptr i64 7 to %Result* + %1 = call i1 @__quantum__qir__read_result(%Result* %result.i1.i) + call void @__quantum__qis__mz__body(%Qubit* %intermediary, %Result* %result.i1.i) + call void @__quantum__qis__reset__body(%Qubit* %intermediary) + br i1 %1, label %then0__2.i, label %TeleportChain__ApplyCorrection__body.exit + +then0__2.i: ; preds = %continue__1.i + call void @__quantum__qis__x(%Qubit* %dest) + br label %TeleportChain__ApplyCorrection__body.exit + +TeleportChain__ApplyCorrection__body.exit: ; preds = %then0__2.i, %continue__1.i + ret void +} + +``` + +We note the absence of loops, and that quantum registers are "allocated" at compile time meaning that each qubit instance is assigned a unique ID. As some code may be dead and optimised away, the qubit allocation is not garantueed to be sequential at this point in time. Future work will include writing a qubit ID remapper which will allow qubits. + +We also note that the function `TeleportChain__TeleportQubitUsingPresharedEntanglement__body` was cloned twice. This is due to the allocation of qubits and the function being called twice. At present, the analyser does not take qubit release into account and just assumes that it will never be released due to the complicated nature for dealing with nested functions at compile time. + +Current TODOs include getting LLVM to remove dead code, do better constant folding and function inlining. Once this is performed correctly, next steps is the remapper and finally a better analysis on what call paths potentially create problems in terms of qubit allocation. + ## Dependencies This library is written in C++ and depends on: diff --git a/src/Passes/Source/Apps/Qat/Qat.cpp b/src/Passes/Source/Apps/Qat/Qat.cpp index a0320a9f21..3ce588330c 100644 --- a/src/Passes/Source/Apps/Qat/Qat.cpp +++ b/src/Passes/Source/Apps/Qat/Qat.cpp @@ -80,6 +80,14 @@ int main(int argc, char **argv) modulePassManager.run(*module, moduleAnalysisManager); + // + + llvm::legacy::PassManager legacy_pass_manager; + legacy_pass_manager.add(llvm::createCalledValuePropagationPass()); + legacy_pass_manager.add(llvm::createCalledValuePropagationPass()); + legacy_pass_manager.add(llvm::createConstantMergePass()); + legacy_pass_manager.run(*module); + llvm::errs() << *module << "\n"; } diff --git a/src/Passes/Source/Llvm/Llvm.hpp b/src/Passes/Source/Llvm/Llvm.hpp index dcb7cb234d..066d1c4644 100644 --- a/src/Passes/Source/Llvm/Llvm.hpp +++ b/src/Passes/Source/Llvm/Llvm.hpp @@ -51,7 +51,8 @@ #include "llvm/Transforms/IPO/Inliner.h" #include "llvm/Transforms/Scalar/LoopUnrollPass.h" -// Testing +// Profiles +#include "llvm/IR/LegacyPassManager.h" #include "llvm/LinkAllPasses.h" #if defined(__clang__) diff --git a/src/Passes/Source/Profiles/BaseProfile.cpp b/src/Passes/Source/Profiles/BaseProfile.cpp index b8ebf0f368..9e99a2eed2 100644 --- a/src/Passes/Source/Profiles/BaseProfile.cpp +++ b/src/Passes/Source/Profiles/BaseProfile.cpp @@ -38,6 +38,11 @@ llvm::ModulePassManager BaseProfile::createGenerationModulePass( factory.disableStringSupport(); function_pass_manager.addPass(TransformationRulePass(std::move(rule_set))); + // function_pass_manager.addPass(llvm::createCalledValuePropagationPass()); + // function_pass_manager.addPass(createSIFoldOperandsPass()); + + // Legacy passes: + // https://llvm.org/doxygen/group__LLVMCTransformsIPO.html#ga2ebfe3e0c3cca3b457708b4784ba93ff // https://llvm.org/docs/NewPassManager.html // modulePassManager.addPass(createModuleToCGSCCPassAdaptor(...)); @@ -46,7 +51,6 @@ llvm::ModulePassManager BaseProfile::createGenerationModulePass( ret.addPass(createModuleToFunctionPassAdaptor(std::move(function_pass_manager))); ret.addPass(llvm::AlwaysInlinerPass()); - return ret; } diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_BASE_74058.csproj b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray_BASE_74058.csproj deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll deleted file mode 100644 index 7dbd00d86e..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/qir/ConstSizeArray.ll +++ /dev/null @@ -1,3445 +0,0 @@ - -%Range = type { i64, i64, i64 } -%Tuple = type opaque -%Qubit = type opaque -%Result = type opaque -%Array = type opaque -%Callable = type opaque -%String = type opaque - -@PauliI = internal constant i2 0 -@PauliX = internal constant i2 1 -@PauliY = internal constant i2 -1 -@PauliZ = internal constant i2 -2 -@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } -@0 = internal constant [18 x i8] c"Unsupported input\00" -@1 = internal constant [18 x i8] c"Unsupported input\00" -@Microsoft__Quantum__Intrinsic__CNOT = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__ctladj__wrapper] -@Microsoft__Quantum__Intrinsic__H = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper] -@Microsoft__Quantum__Intrinsic__Rx = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__ctladj__wrapper] -@Microsoft__Quantum__Intrinsic__Ry = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Ry__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Ry__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Ry__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Ry__ctladj__wrapper] -@Microsoft__Quantum__Intrinsic__Rz = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rz__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rz__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rz__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rz__ctladj__wrapper] -@Microsoft__Quantum__Intrinsic__S = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper] -@Microsoft__Quantum__Intrinsic__T = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__ctladj__wrapper] -@Microsoft__Quantum__Intrinsic__X = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper] -@Microsoft__Quantum__Intrinsic__Z = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__ctladj__wrapper] -@2 = internal constant [3 x i8] c"()\00" - -define internal void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) { -entry: - %0 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) - %1 = call %Result* @__quantum__rt__result_get_one() - %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) - call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) - br i1 %2, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %dest) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - %3 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %intermediary) - %4 = call %Result* @__quantum__rt__result_get_one() - %5 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %4) - call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) - br i1 %5, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %dest) - br label %continue__2 - -continue__2: ; preds = %then0__2, %continue__1 - ret void -} - -define internal %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) { -entry: - %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -declare %Result* @__quantum__rt__result_get_one() - -declare i1 @__quantum__rt__result_equal(%Result*, %Result*) - -declare void @__quantum__rt__result_update_reference_count(%Result*, i32) - -define internal void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) { -entry: - call void @__quantum__qis__z(%Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { -entry: - call void @__quantum__qis__x(%Qubit* %qubit) - ret void -} - -define internal void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() { -entry: - %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() - %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() - %leftPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 1) - %rightPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 1) - call void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) - br label %header__1 - -header__1: ; preds = %exiting__1, %entry - %i = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] - %0 = icmp sle i64 %i, 1 - br i1 %0, label %body__1, label %exit__1 - -body__1: ; preds = %header__1 - %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 %i) - %2 = bitcast i8* %1 to %Qubit** - %3 = load %Qubit*, %Qubit** %2, align 8 - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 %i) - %5 = bitcast i8* %4 to %Qubit** - %6 = load %Qubit*, %Qubit** %5, align 8 - call void @TeleportChain__PrepareEntangledPair__body(%Qubit* %3, %Qubit* %6) - br label %exiting__1 - -exiting__1: ; preds = %body__1 - %7 = add i64 %i, 1 - br label %header__1 - -exit__1: ; preds = %header__1 - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) - %9 = bitcast i8* %8 to %Qubit** - %10 = load %Qubit*, %Qubit** %9, align 8 - %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) - %12 = bitcast i8* %11 to %Qubit** - %13 = load %Qubit*, %Qubit** %12, align 8 - call void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %10, %Qubit* %13) - br label %header__2 - -header__2: ; preds = %exiting__2, %exit__1 - %i__1 = phi i64 [ 1, %exit__1 ], [ %25, %exiting__2 ] - %14 = icmp sle i64 %i__1, 1 - br i1 %14, label %body__2, label %exit__2 - -body__2: ; preds = %header__2 - %15 = sub i64 %i__1, 1 - %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 %15) - %17 = bitcast i8* %16 to %Qubit** - %18 = load %Qubit*, %Qubit** %17, align 8 - %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 %i__1) - %20 = bitcast i8* %19 to %Qubit** - %21 = load %Qubit*, %Qubit** %20, align 8 - %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 %i__1) - %23 = bitcast i8* %22 to %Qubit** - %24 = load %Qubit*, %Qubit** %23, align 8 - call void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %18, %Qubit* %21, %Qubit* %24) - br label %exiting__2 - -exiting__2: ; preds = %body__2 - %25 = add i64 %i__1, 1 - br label %header__2 - -exit__2: ; preds = %header__2 - %26 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %leftMessage) - %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %28 = bitcast i8* %27 to %Qubit** - %29 = load %Qubit*, %Qubit** %28, align 8 - %30 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %29) - call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %26, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %30, i32 -1) - call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) - call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) - call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) - call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) - ret void -} - -declare %Qubit* @__quantum__rt__qubit_allocate() - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) - -declare void @__quantum__rt__qubit_release(%Qubit*) - -declare void @__quantum__rt__qubit_release_array(%Array*) - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) - -define internal void @TeleportChain__PrepareEntangledPair__body(%Qubit* %left, %Qubit* %right) { -entry: - call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %left) - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %left, %Qubit* %right) - ret void -} - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) - -define internal void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) { -entry: - call void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %src, %Qubit* %intermediary) - call void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { -entry: - call void @__quantum__qis__h(%Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { -entry: - call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) - ret void -} - -define internal void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %left, %Qubit* %right) { -entry: - call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %left, %Qubit* %right) - call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %left) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { -entry: - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) { -entry: - call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) - ret void -} - -define internal void @TeleportChain__PrepareEntangledPair__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 - %left = load %Qubit*, %Qubit** %1, align 8 - %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 - %right = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %left) - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* - %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 - store %Qubit* %left, %Qubit** %5, align 8 - store %Qubit* %right, %Qubit** %6, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 0 - br i1 %1, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__h(%Qubit* %qubit) - br label %continue__1 - -test1__1: ; preds = %entry - %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %3 = icmp eq i64 %2, 1 - br i1 %3, label %then1__1, label %else__1 - -then1__1: ; preds = %test1__1 - call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %5 = bitcast i8* %4 to %Qubit** - %6 = load %Qubit*, %Qubit** %5, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %6, %Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) - br label %continue__1 - -else__1: ; preds = %test1__1 - %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) - call void @__quantum__rt__callable_make_controlled(%Callable* %7) - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* - %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Array* %ctls, %Array** %10, align 8 - store %Qubit* %qubit, %Qubit** %11, align 8 - call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %7, { %Array*, %Qubit* }* %9) - call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - br label %continue__1 - -continue__1: ; preds = %else__1, %then1__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %ctls, { %Qubit*, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 - %control = load %Qubit*, %Qubit** %1, align 8 - %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 - %target = load %Qubit*, %Qubit** %2, align 8 - %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %4 = icmp eq i64 %3, 0 - br i1 %4, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) - br label %continue__1 - -test1__1: ; preds = %entry - %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %6 = icmp eq i64 %5, 1 - br i1 %6, label %then1__1, label %else__1 - -then1__1: ; preds = %test1__1 - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %8 = bitcast i8* %7 to %Qubit** - %9 = load %Qubit*, %Qubit** %8, align 8 - call void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %9, %Qubit* %control, %Qubit* %target) - br label %continue__1 - -else__1: ; preds = %test1__1 - %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__CNOT, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) - call void @__quantum__rt__callable_make_controlled(%Callable* %10) - %11 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %12 = bitcast %Tuple* %11 to { %Array*, { %Qubit*, %Qubit* }* }* - %13 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %12, i32 0, i32 0 - %14 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %12, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - %15 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %16 = bitcast %Tuple* %15 to { %Qubit*, %Qubit* }* - %17 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %16, i32 0, i32 0 - %18 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %16, i32 0, i32 1 - store %Qubit* %control, %Qubit** %17, align 8 - store %Qubit* %target, %Qubit** %18, align 8 - store %Array* %ctls, %Array** %13, align 8 - store { %Qubit*, %Qubit* }* %16, { %Qubit*, %Qubit* }** %14, align 8 - call void @Microsoft__Quantum__Intrinsic___27e64f0afee94ef4bf9523108ce47367___QsRef23__ApplyWithLessControlsA____body(%Callable* %10, { %Array*, { %Qubit*, %Qubit* }* }* %12) - call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) - br label %continue__1 - -continue__1: ; preds = %else__1, %then1__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - ret void -} - -declare %Tuple* @__quantum__rt__tuple_create(i64) - -declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) - -define internal void @TeleportChain__PrepareEntangledPair__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 - %left = load %Qubit*, %Qubit** %1, align 8 - %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 - %right = load %Qubit*, %Qubit** %2, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* - %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 - store %Qubit* %left, %Qubit** %5, align 8 - store %Qubit* %right, %Qubit** %6, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) - call void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %left) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 - %control = load %Qubit*, %Qubit** %1, align 8 - %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 - %target = load %Qubit*, %Qubit** %2, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* - %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 - store %Qubit* %control, %Qubit** %5, align 8 - store %Qubit* %target, %Qubit** %6, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %qubit) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - ret void -} - -define internal double @Microsoft__Quantum__Math__PI__body() { -entry: - ret double 0x400921FB54442D18 -} - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyControlledX____body(%Qubit* %control, %Qubit* %target) { -entry: - call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) - ret void -} - -declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyControlledX____adj(%Qubit* %control, %Qubit* %target) { -entry: - call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyControlledZ____body(%Qubit* %control, %Qubit* %target) { -entry: - call void @__quantum__qis__cz(%Qubit* %control, %Qubit* %target) - ret void -} - -declare void @__quantum__qis__cz(%Qubit*, %Qubit*) - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyControlledZ____adj(%Qubit* %control, %Qubit* %target) { -entry: - call void @__quantum__qis__cz(%Qubit* %control, %Qubit* %target) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____body(double %theta) { -entry: - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____adj(double %theta) { -entry: - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____ctl(%Array* %controls, double %theta) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) - %1 = icmp sgt i64 %0, 0 - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 0) - %3 = bitcast i8* %2 to %Qubit** - %qubit = load %Qubit*, %Qubit** %3, align 8 - %4 = sub i64 %0, 1 - %5 = load %Range, %Range* @EmptyRange, align 4 - %6 = insertvalue %Range %5, i64 1, 0 - %7 = insertvalue %Range %6, i64 1, 1 - %8 = insertvalue %Range %7, i64 %4, 2 - %rest = call %Array* @__quantum__rt__array_slice_1d(%Array* %controls, %Range %8, i1 true) - call void @__quantum__rt__array_update_alias_count(%Array* %rest, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %10 = bitcast %Tuple* %9 to { double, %Qubit* }* - %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 1 - store double %theta, double* %11, align 8 - store %Qubit* %qubit, %Qubit** %12, align 8 - call void @Microsoft__Quantum__Intrinsic__R1__ctl(%Array* %rest, { double, %Qubit* }* %10) - call void @__quantum__rt__array_update_alias_count(%Array* %rest, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %rest, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) - ret void -} - -declare i64 @__quantum__rt__array_get_size_1d(%Array*) - -declare %Array* @__quantum__rt__array_slice_1d(%Array*, %Range, i1) - -define internal void @Microsoft__Quantum__Intrinsic__R1__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %theta = load double, double* %1, align 8 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %qubit = load %Qubit*, %Qubit** %2, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) - %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* - %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 - %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 - %8 = load i2, i2* @PauliZ, align 1 - store i2 %8, i2* %5, align 1 - store double %theta, double* %6, align 8 - store %Qubit* %qubit, %Qubit** %7, align 8 - call void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) - %10 = bitcast %Tuple* %9 to { i2, double, %Qubit* }* - %11 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %10, i32 0, i32 1 - %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %10, i32 0, i32 2 - %14 = load i2, i2* @PauliI, align 1 - %15 = fneg double %theta - store i2 %14, i2* %11, align 1 - store double %15, double* %12, align 8 - store %Qubit* %qubit, %Qubit** %13, align 8 - call void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %10) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) - ret void -} - -declare void @__quantum__rt__array_update_reference_count(%Array*, i32) - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____ctladj(%Array* %controls, double %theta) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) - %1 = icmp sgt i64 %0, 0 - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 0) - %3 = bitcast i8* %2 to %Qubit** - %__qsVar0__qubit__ = load %Qubit*, %Qubit** %3, align 8 - %4 = sub i64 %0, 1 - %5 = load %Range, %Range* @EmptyRange, align 4 - %6 = insertvalue %Range %5, i64 1, 0 - %7 = insertvalue %Range %6, i64 1, 1 - %8 = insertvalue %Range %7, i64 %4, 2 - %__qsVar1__rest__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controls, %Range %8, i1 true) - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__rest__, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %10 = bitcast %Tuple* %9 to { double, %Qubit* }* - %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 1 - store double %theta, double* %11, align 8 - store %Qubit* %__qsVar0__qubit__, %Qubit** %12, align 8 - call void @Microsoft__Quantum__Intrinsic__R1__ctladj(%Array* %__qsVar1__rest__, { double, %Qubit* }* %10) - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__rest__, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__rest__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__R1__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %theta = load double, double* %1, align 8 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %qubit = load %Qubit*, %Qubit** %2, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) - %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* - %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 - %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 - %8 = load i2, i2* @PauliI, align 1 - %9 = fneg double %theta - store i2 %8, i2* %5, align 1 - store double %9, double* %6, align 8 - store %Qubit* %qubit, %Qubit** %7, align 8 - call void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) - %11 = bitcast %Tuple* %10 to { i2, double, %Qubit* }* - %12 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %11, i32 0, i32 0 - %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %11, i32 0, i32 1 - %14 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %11, i32 0, i32 2 - %15 = load i2, i2* @PauliZ, align 1 - store i2 %15, i2* %12, align 1 - store double %theta, double* %13, align 8 - store %Qubit* %qubit, %Qubit** %14, align 8 - call void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %11) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledH____body(%Qubit* %qubit) { -entry: - call void @__quantum__qis__h(%Qubit* %qubit) - ret void -} - -declare void @__quantum__qis__h(%Qubit*) - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledH____adj(%Qubit* %qubit) { -entry: - call void @__quantum__qis__h(%Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledX____body(%Qubit* %qubit) { -entry: - call void @__quantum__qis__x(%Qubit* %qubit) - ret void -} - -declare void @__quantum__qis__x(%Qubit*) - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledX____adj(%Qubit* %qubit) { -entry: - call void @__quantum__qis__x(%Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledZ____body(%Qubit* %qubit) { -entry: - call void @__quantum__qis__z(%Qubit* %qubit) - ret void -} - -declare void @__quantum__qis__z(%Qubit*) - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledZ____adj(%Qubit* %qubit) { -entry: - call void @__quantum__qis__z(%Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____body(%Qubit* %qubit, i2 %from, i2 %to) { -entry: - %0 = icmp eq i2 %from, %to - br i1 %0, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - br label %continue__1 - -test1__1: ; preds = %entry - %1 = load i2, i2* @PauliZ, align 1 - %2 = icmp eq i2 %from, %1 - br i1 %2, label %condTrue__1, label %condContinue__1 - -condTrue__1: ; preds = %test1__1 - %3 = load i2, i2* @PauliX, align 1 - %4 = icmp eq i2 %to, %3 - br label %condContinue__1 - -condContinue__1: ; preds = %condTrue__1, %test1__1 - %5 = phi i1 [ %4, %condTrue__1 ], [ %2, %test1__1 ] - %6 = xor i1 %5, true - br i1 %6, label %condTrue__2, label %condContinue__2 - -condTrue__2: ; preds = %condContinue__1 - %7 = load i2, i2* @PauliX, align 1 - %8 = icmp eq i2 %from, %7 - br i1 %8, label %condTrue__3, label %condContinue__3 - -condTrue__3: ; preds = %condTrue__2 - %9 = load i2, i2* @PauliZ, align 1 - %10 = icmp eq i2 %to, %9 - br label %condContinue__3 - -condContinue__3: ; preds = %condTrue__3, %condTrue__2 - %11 = phi i1 [ %10, %condTrue__3 ], [ %8, %condTrue__2 ] - br label %condContinue__2 - -condContinue__2: ; preds = %condContinue__3, %condContinue__1 - %12 = phi i1 [ %11, %condContinue__3 ], [ %5, %condContinue__1 ] - br i1 %12, label %then1__1, label %test2__1 - -then1__1: ; preds = %condContinue__2 - call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) - br label %continue__1 - -test2__1: ; preds = %condContinue__2 - %13 = load i2, i2* @PauliZ, align 1 - %14 = icmp eq i2 %from, %13 - br i1 %14, label %condTrue__4, label %condContinue__4 - -condTrue__4: ; preds = %test2__1 - %15 = load i2, i2* @PauliY, align 1 - %16 = icmp eq i2 %to, %15 - br label %condContinue__4 - -condContinue__4: ; preds = %condTrue__4, %test2__1 - %17 = phi i1 [ %16, %condTrue__4 ], [ %14, %test2__1 ] - br i1 %17, label %then2__1, label %test3__1 - -then2__1: ; preds = %condContinue__4 - call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) - br label %continue__1 - -test3__1: ; preds = %condContinue__4 - %18 = load i2, i2* @PauliY, align 1 - %19 = icmp eq i2 %from, %18 - br i1 %19, label %condTrue__5, label %condContinue__5 - -condTrue__5: ; preds = %test3__1 - %20 = load i2, i2* @PauliZ, align 1 - %21 = icmp eq i2 %to, %20 - br label %condContinue__5 - -condContinue__5: ; preds = %condTrue__5, %test3__1 - %22 = phi i1 [ %21, %condTrue__5 ], [ %19, %test3__1 ] - br i1 %22, label %then3__1, label %test4__1 - -then3__1: ; preds = %condContinue__5 - call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) - br label %continue__1 - -test4__1: ; preds = %condContinue__5 - %23 = load i2, i2* @PauliY, align 1 - %24 = icmp eq i2 %from, %23 - br i1 %24, label %condTrue__6, label %condContinue__6 - -condTrue__6: ; preds = %test4__1 - %25 = load i2, i2* @PauliX, align 1 - %26 = icmp eq i2 %to, %25 - br label %condContinue__6 - -condContinue__6: ; preds = %condTrue__6, %test4__1 - %27 = phi i1 [ %26, %condTrue__6 ], [ %24, %test4__1 ] - br i1 %27, label %then4__1, label %test5__1 - -then4__1: ; preds = %condContinue__6 - call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) - br label %continue__1 - -test5__1: ; preds = %condContinue__6 - %28 = load i2, i2* @PauliX, align 1 - %29 = icmp eq i2 %from, %28 - br i1 %29, label %condTrue__7, label %condContinue__7 - -condTrue__7: ; preds = %test5__1 - %30 = load i2, i2* @PauliY, align 1 - %31 = icmp eq i2 %to, %30 - br label %condContinue__7 - -condContinue__7: ; preds = %condTrue__7, %test5__1 - %32 = phi i1 [ %31, %condTrue__7 ], [ %29, %test5__1 ] - br i1 %32, label %then5__1, label %else__1 - -then5__1: ; preds = %condContinue__7 - call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) - br label %continue__1 - -else__1: ; preds = %condContinue__7 - %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @0, i32 0, i32 0)) - call void @__quantum__rt__fail(%String* %33) - unreachable - -continue__1: ; preds = %then5__1, %then4__1, %then3__1, %then2__1, %then1__1, %then0__1 - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) { -entry: - call void @__quantum__qis__s(%Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) { -entry: - call void @__quantum__qis__sadj(%Qubit* %qubit) - ret void -} - -declare %String* @__quantum__rt__string_create(i8*) - -declare void @__quantum__rt__fail(%String*) - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____adj(%Qubit* %qubit, i2 %from, i2 %to) { -entry: - %0 = icmp eq i2 %from, %to - br i1 %0, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - br label %continue__1 - -test1__1: ; preds = %entry - %1 = load i2, i2* @PauliZ, align 1 - %2 = icmp eq i2 %from, %1 - br i1 %2, label %condTrue__1, label %condContinue__1 - -condTrue__1: ; preds = %test1__1 - %3 = load i2, i2* @PauliX, align 1 - %4 = icmp eq i2 %to, %3 - br label %condContinue__1 - -condContinue__1: ; preds = %condTrue__1, %test1__1 - %5 = phi i1 [ %4, %condTrue__1 ], [ %2, %test1__1 ] - %6 = xor i1 %5, true - br i1 %6, label %condTrue__2, label %condContinue__2 - -condTrue__2: ; preds = %condContinue__1 - %7 = load i2, i2* @PauliX, align 1 - %8 = icmp eq i2 %from, %7 - br i1 %8, label %condTrue__3, label %condContinue__3 - -condTrue__3: ; preds = %condTrue__2 - %9 = load i2, i2* @PauliZ, align 1 - %10 = icmp eq i2 %to, %9 - br label %condContinue__3 - -condContinue__3: ; preds = %condTrue__3, %condTrue__2 - %11 = phi i1 [ %10, %condTrue__3 ], [ %8, %condTrue__2 ] - br label %condContinue__2 - -condContinue__2: ; preds = %condContinue__3, %condContinue__1 - %12 = phi i1 [ %11, %condContinue__3 ], [ %5, %condContinue__1 ] - br i1 %12, label %then1__1, label %test2__1 - -then1__1: ; preds = %condContinue__2 - call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) - br label %continue__1 - -test2__1: ; preds = %condContinue__2 - %13 = load i2, i2* @PauliZ, align 1 - %14 = icmp eq i2 %from, %13 - br i1 %14, label %condTrue__4, label %condContinue__4 - -condTrue__4: ; preds = %test2__1 - %15 = load i2, i2* @PauliY, align 1 - %16 = icmp eq i2 %to, %15 - br label %condContinue__4 - -condContinue__4: ; preds = %condTrue__4, %test2__1 - %17 = phi i1 [ %16, %condTrue__4 ], [ %14, %test2__1 ] - br i1 %17, label %then2__1, label %test3__1 - -then2__1: ; preds = %condContinue__4 - call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) - br label %continue__1 - -test3__1: ; preds = %condContinue__4 - %18 = load i2, i2* @PauliY, align 1 - %19 = icmp eq i2 %from, %18 - br i1 %19, label %condTrue__5, label %condContinue__5 - -condTrue__5: ; preds = %test3__1 - %20 = load i2, i2* @PauliZ, align 1 - %21 = icmp eq i2 %to, %20 - br label %condContinue__5 - -condContinue__5: ; preds = %condTrue__5, %test3__1 - %22 = phi i1 [ %21, %condTrue__5 ], [ %19, %test3__1 ] - br i1 %22, label %then3__1, label %test4__1 - -then3__1: ; preds = %condContinue__5 - call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) - br label %continue__1 - -test4__1: ; preds = %condContinue__5 - %23 = load i2, i2* @PauliY, align 1 - %24 = icmp eq i2 %from, %23 - br i1 %24, label %condTrue__6, label %condContinue__6 - -condTrue__6: ; preds = %test4__1 - %25 = load i2, i2* @PauliX, align 1 - %26 = icmp eq i2 %to, %25 - br label %condContinue__6 - -condContinue__6: ; preds = %condTrue__6, %test4__1 - %27 = phi i1 [ %26, %condTrue__6 ], [ %24, %test4__1 ] - br i1 %27, label %then4__1, label %test5__1 - -then4__1: ; preds = %condContinue__6 - call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) - br label %continue__1 - -test5__1: ; preds = %condContinue__6 - %28 = load i2, i2* @PauliX, align 1 - %29 = icmp eq i2 %from, %28 - br i1 %29, label %condTrue__7, label %condContinue__7 - -condTrue__7: ; preds = %test5__1 - %30 = load i2, i2* @PauliY, align 1 - %31 = icmp eq i2 %to, %30 - br label %condContinue__7 - -condContinue__7: ; preds = %condTrue__7, %test5__1 - %32 = phi i1 [ %31, %condTrue__7 ], [ %29, %test5__1 ] - br i1 %32, label %then5__1, label %else__1 - -then5__1: ; preds = %condContinue__7 - call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) - br label %continue__1 - -else__1: ; preds = %condContinue__7 - %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @1, i32 0, i32 0)) - call void @__quantum__rt__fail(%String* %33) - unreachable - -continue__1: ; preds = %then5__1, %then4__1, %then3__1, %then2__1, %then1__1, %then0__1 - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { -entry: - call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %target) - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %target, %Qubit* %control1) - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control1, %Qubit* %control2) - call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %control2) - call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %control1) - call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %target) - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %target, %Qubit* %control1) - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control1, %Qubit* %control2) - call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %control2) - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %target, %Qubit* %control2) - call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %target) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) { -entry: - call void @__quantum__qis__t(%Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) { -entry: - call void @__quantum__qis__tadj(%Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { -entry: - call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %target) - call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %target, %Qubit* %control2) - call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %control2) - call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control1, %Qubit* %control2) - call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %target, %Qubit* %control1) - call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %target) - call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %control1) - call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %control2) - call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control1, %Qubit* %control2) - call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %target, %Qubit* %control1) - call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %target) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { -entry: - call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %target) - %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) - %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) - %2 = bitcast i8* %1 to %Qubit** - %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 1) - %4 = bitcast i8* %3 to %Qubit** - store %Qubit* %control1, %Qubit** %2, align 8 - store %Qubit* %control2, %Qubit** %4, align 8 - call void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %0, %Qubit* %target) - call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %target) - call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 0 - br i1 %1, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__z(%Qubit* %qubit) - br label %continue__1 - -test1__1: ; preds = %entry - %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %3 = icmp eq i64 %2, 1 - br i1 %3, label %then1__1, label %test2__1 - -then1__1: ; preds = %test1__1 - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %5 = bitcast i8* %4 to %Qubit** - %control = load %Qubit*, %Qubit** %5, align 8 - call void @__quantum__qis__cz(%Qubit* %control, %Qubit* %qubit) - br label %continue__1 - -test2__1: ; preds = %test1__1 - %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %7 = icmp eq i64 %6, 2 - br i1 %7, label %then2__1, label %else__1 - -then2__1: ; preds = %test2__1 - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %9 = bitcast i8* %8 to %Qubit** - %10 = load %Qubit*, %Qubit** %9, align 8 - call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %10) - %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) - %12 = bitcast i8* %11 to %Qubit** - %13 = load %Qubit*, %Qubit** %12, align 8 - call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %13) - %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %15 = bitcast i8* %14 to %Qubit** - %16 = load %Qubit*, %Qubit** %15, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %qubit, %Qubit* %16) - %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %18 = bitcast i8* %17 to %Qubit** - %19 = load %Qubit*, %Qubit** %18, align 8 - call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %19) - %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) - %21 = bitcast i8* %20 to %Qubit** - %22 = load %Qubit*, %Qubit** %21, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %22, %Qubit* %qubit) - %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) - %24 = bitcast i8* %23 to %Qubit** - %25 = load %Qubit*, %Qubit** %24, align 8 - %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %27 = bitcast i8* %26 to %Qubit** - %28 = load %Qubit*, %Qubit** %27, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %25, %Qubit* %28) - call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) - %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %30 = bitcast i8* %29 to %Qubit** - %31 = load %Qubit*, %Qubit** %30, align 8 - call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %31) - %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) - %33 = bitcast i8* %32 to %Qubit** - %34 = load %Qubit*, %Qubit** %33, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %34, %Qubit* %qubit) - %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %36 = bitcast i8* %35 to %Qubit** - %37 = load %Qubit*, %Qubit** %36, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %qubit, %Qubit* %37) - call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) - %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %39 = bitcast i8* %38 to %Qubit** - %40 = load %Qubit*, %Qubit** %39, align 8 - call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %40) - %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) - %42 = bitcast i8* %41 to %Qubit** - %43 = load %Qubit*, %Qubit** %42, align 8 - %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %45 = bitcast i8* %44 to %Qubit** - %46 = load %Qubit*, %Qubit** %45, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %43, %Qubit* %46) - br label %continue__1 - -else__1: ; preds = %test2__1 - %47 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Z, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) - call void @__quantum__rt__callable_make_controlled(%Callable* %47) - %48 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %49 = bitcast %Tuple* %48 to { %Array*, %Qubit* }* - %50 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %49, i32 0, i32 0 - %51 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %49, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Array* %ctls, %Array** %50, align 8 - store %Qubit* %qubit, %Qubit** %51, align 8 - call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %47, { %Array*, %Qubit* }* %49) - call void @__quantum__rt__capture_update_reference_count(%Callable* %47, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %47, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %48, i32 -1) - br label %continue__1 - -continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - ret void -} - -declare %Array* @__quantum__rt__array_create_1d(i32, i64) - -define internal void @Microsoft__Quantum__Intrinsic__CCNOT__adj(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { -entry: - call void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__CCNOT__ctl(%Array* %ctls, { %Qubit*, %Qubit*, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 - %control1 = load %Qubit*, %Qubit** %1, align 8 - %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 - %control2 = load %Qubit*, %Qubit** %2, align 8 - %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 - %target = load %Qubit*, %Qubit** %3, align 8 - %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) - %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 0) - %6 = bitcast i8* %5 to %Qubit** - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 1) - %8 = bitcast i8* %7 to %Qubit** - store %Qubit* %control1, %Qubit** %6, align 8 - store %Qubit* %control2, %Qubit** %8, align 8 - %9 = call %Array* @__quantum__rt__array_concatenate(%Array* %ctls, %Array* %4) - call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %9, %Qubit* %target) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %ctls, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 0 - br i1 %1, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__x(%Qubit* %qubit) - br label %continue__1 - -test1__1: ; preds = %entry - %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %3 = icmp eq i64 %2, 1 - br i1 %3, label %then1__1, label %test2__1 - -then1__1: ; preds = %test1__1 - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %5 = bitcast i8* %4 to %Qubit** - %control = load %Qubit*, %Qubit** %5, align 8 - call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %qubit) - br label %continue__1 - -test2__1: ; preds = %test1__1 - %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %7 = icmp eq i64 %6, 2 - br i1 %7, label %then2__1, label %else__1 - -then2__1: ; preds = %test2__1 - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %9 = bitcast i8* %8 to %Qubit** - %10 = load %Qubit*, %Qubit** %9, align 8 - %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) - %12 = bitcast i8* %11 to %Qubit** - %13 = load %Qubit*, %Qubit** %12, align 8 - call void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %10, %Qubit* %13, %Qubit* %qubit) - br label %continue__1 - -else__1: ; preds = %test2__1 - %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) - call void @__quantum__rt__callable_make_controlled(%Callable* %14) - %15 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %16 = bitcast %Tuple* %15 to { %Array*, %Qubit* }* - %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 0 - %18 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Array* %ctls, %Array** %17, align 8 - store %Qubit* %qubit, %Qubit** %18, align 8 - call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %14, { %Array*, %Qubit* }* %16) - call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) - br label %continue__1 - -continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - ret void -} - -declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) - -define internal void @Microsoft__Quantum__Intrinsic__CCNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit*, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 - %control1 = load %Qubit*, %Qubit** %1, align 8 - %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 - %control2 = load %Qubit*, %Qubit** %2, align 8 - %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 - %target = load %Qubit*, %Qubit** %3, align 8 - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 3)) - %5 = bitcast %Tuple* %4 to { %Qubit*, %Qubit*, %Qubit* }* - %6 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %5, i32 0, i32 1 - %8 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %5, i32 0, i32 2 - store %Qubit* %control1, %Qubit** %6, align 8 - store %Qubit* %control2, %Qubit** %7, align 8 - store %Qubit* %target, %Qubit** %8, align 8 - call void @Microsoft__Quantum__Intrinsic__CCNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit*, %Qubit* }* %5) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic___27e64f0afee94ef4bf9523108ce47367___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) - %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 - %controls = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) - %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 - %arg = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 - %3 = bitcast { %Qubit*, %Qubit* }* %arg to %Tuple* - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) - %numControls = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) - %numControlPairs = sdiv i64 %numControls, 2 - %temps = call %Array* @__quantum__rt__qubit_allocate_array(i64 %numControlPairs) - call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 1) - %4 = sub i64 %numControlPairs, 1 - br label %header__1 - -header__1: ; preds = %exiting__1, %entry - %__qsVar0__numPair__ = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] - %5 = icmp sle i64 %__qsVar0__numPair__, %4 - br i1 %5, label %body__1, label %exit__1 - -body__1: ; preds = %header__1 - %6 = mul i64 2, %__qsVar0__numPair__ - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %6) - %8 = bitcast i8* %7 to %Qubit** - %9 = load %Qubit*, %Qubit** %8, align 8 - %10 = mul i64 2, %__qsVar0__numPair__ - %11 = add i64 %10, 1 - %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %11) - %13 = bitcast i8* %12 to %Qubit** - %14 = load %Qubit*, %Qubit** %13, align 8 - %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0__numPair__) - %16 = bitcast i8* %15 to %Qubit** - %17 = load %Qubit*, %Qubit** %16, align 8 - call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %9, %Qubit* %14, %Qubit* %17) - br label %exiting__1 - -exiting__1: ; preds = %body__1 - %18 = add i64 %__qsVar0__numPair__, 1 - br label %header__1 - -exit__1: ; preds = %header__1 - %19 = srem i64 %numControls, 2 - %20 = icmp eq i64 %19, 0 - br i1 %20, label %condTrue__1, label %condFalse__1 - -condTrue__1: ; preds = %exit__1 - call void @__quantum__rt__array_update_reference_count(%Array* %temps, i32 1) - br label %condContinue__1 - -condFalse__1: ; preds = %exit__1 - %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) - %23 = bitcast i8* %22 to %Qubit** - %24 = sub i64 %numControls, 1 - %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %24) - %26 = bitcast i8* %25 to %Qubit** - %27 = load %Qubit*, %Qubit** %26, align 8 - store %Qubit* %27, %Qubit** %23, align 8 - %28 = call %Array* @__quantum__rt__array_concatenate(%Array* %temps, %Array* %21) - call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) - call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) - br label %condContinue__1 - -condContinue__1: ; preds = %condFalse__1, %condTrue__1 - %__qsVar1__newControls__ = phi %Array* [ %temps, %condTrue__1 ], [ %28, %condFalse__1 ] - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 1) - %29 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %30 = bitcast %Tuple* %29 to { %Array*, { %Qubit*, %Qubit* }* }* - %31 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %30, i32 0, i32 0 - %32 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %30, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) - store %Array* %__qsVar1__newControls__, %Array** %31, align 8 - store { %Qubit*, %Qubit* }* %arg, { %Qubit*, %Qubit* }** %32, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %29, %Tuple* null) - %33 = sub i64 %numControlPairs, 1 - %34 = sub i64 %33, 0 - %35 = sdiv i64 %34, 1 - %36 = mul i64 1, %35 - %37 = add i64 0, %36 - %38 = load %Range, %Range* @EmptyRange, align 4 - %39 = insertvalue %Range %38, i64 %37, 0 - %40 = insertvalue %Range %39, i64 -1, 1 - %41 = insertvalue %Range %40, i64 0, 2 - %42 = extractvalue %Range %41, 0 - %43 = extractvalue %Range %41, 1 - %44 = extractvalue %Range %41, 2 - br label %preheader__1 - -preheader__1: ; preds = %condContinue__1 - %45 = icmp sgt i64 %43, 0 - br label %header__2 - -header__2: ; preds = %exiting__2, %preheader__1 - %__qsVar0____qsVar0__numPair____ = phi i64 [ %42, %preheader__1 ], [ %61, %exiting__2 ] - %46 = icmp sle i64 %__qsVar0____qsVar0__numPair____, %44 - %47 = icmp sge i64 %__qsVar0____qsVar0__numPair____, %44 - %48 = select i1 %45, i1 %46, i1 %47 - br i1 %48, label %body__2, label %exit__2 - -body__2: ; preds = %header__2 - %49 = mul i64 2, %__qsVar0____qsVar0__numPair____ - %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %49) - %51 = bitcast i8* %50 to %Qubit** - %52 = load %Qubit*, %Qubit** %51, align 8 - %53 = mul i64 2, %__qsVar0____qsVar0__numPair____ - %54 = add i64 %53, 1 - %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %54) - %56 = bitcast i8* %55 to %Qubit** - %57 = load %Qubit*, %Qubit** %56, align 8 - %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0____qsVar0__numPair____) - %59 = bitcast i8* %58 to %Qubit** - %60 = load %Qubit*, %Qubit** %59, align 8 - call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %52, %Qubit* %57, %Qubit* %60) - br label %exiting__2 - -exiting__2: ; preds = %body__2 - %61 = add i64 %__qsVar0____qsVar0__numPair____, %43 - br label %header__2 - -exit__2: ; preds = %header__2 - call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %temps) - call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__CNOT__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit* }* - %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Qubit*, %Qubit** %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit* }* - %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Qubit*, %Qubit** %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit* }* }* - %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %3, { %Qubit*, %Qubit* }* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit* }* }* - %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %3, { %Qubit*, %Qubit* }* %4) - ret void -} - -declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) - -declare void @__quantum__rt__callable_make_controlled(%Callable*) - -declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) - -declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) - -define internal void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, %Qubit* }* %0) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %controls = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %arg = load %Qubit*, %Qubit** %2, align 8 - %numControls = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) - %numControlPairs = sdiv i64 %numControls, 2 - %temps = call %Array* @__quantum__rt__qubit_allocate_array(i64 %numControlPairs) - call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 1) - %3 = sub i64 %numControlPairs, 1 - br label %header__1 - -header__1: ; preds = %exiting__1, %entry - %__qsVar0__numPair__ = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] - %4 = icmp sle i64 %__qsVar0__numPair__, %3 - br i1 %4, label %body__1, label %exit__1 - -body__1: ; preds = %header__1 - %5 = mul i64 2, %__qsVar0__numPair__ - %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %5) - %7 = bitcast i8* %6 to %Qubit** - %8 = load %Qubit*, %Qubit** %7, align 8 - %9 = mul i64 2, %__qsVar0__numPair__ - %10 = add i64 %9, 1 - %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %10) - %12 = bitcast i8* %11 to %Qubit** - %13 = load %Qubit*, %Qubit** %12, align 8 - %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0__numPair__) - %15 = bitcast i8* %14 to %Qubit** - %16 = load %Qubit*, %Qubit** %15, align 8 - call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %8, %Qubit* %13, %Qubit* %16) - br label %exiting__1 - -exiting__1: ; preds = %body__1 - %17 = add i64 %__qsVar0__numPair__, 1 - br label %header__1 - -exit__1: ; preds = %header__1 - %18 = srem i64 %numControls, 2 - %19 = icmp eq i64 %18, 0 - br i1 %19, label %condTrue__1, label %condFalse__1 - -condTrue__1: ; preds = %exit__1 - call void @__quantum__rt__array_update_reference_count(%Array* %temps, i32 1) - br label %condContinue__1 - -condFalse__1: ; preds = %exit__1 - %20 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 0) - %22 = bitcast i8* %21 to %Qubit** - %23 = sub i64 %numControls, 1 - %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %23) - %25 = bitcast i8* %24 to %Qubit** - %26 = load %Qubit*, %Qubit** %25, align 8 - store %Qubit* %26, %Qubit** %22, align 8 - %27 = call %Array* @__quantum__rt__array_concatenate(%Array* %temps, %Array* %20) - call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 1) - call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) - br label %condContinue__1 - -condContinue__1: ; preds = %condFalse__1, %condTrue__1 - %__qsVar1__newControls__ = phi %Array* [ %temps, %condTrue__1 ], [ %27, %condFalse__1 ] - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 1) - %28 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %29 = bitcast %Tuple* %28 to { %Array*, %Qubit* }* - %30 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %29, i32 0, i32 0 - %31 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %29, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 1) - store %Array* %__qsVar1__newControls__, %Array** %30, align 8 - store %Qubit* %arg, %Qubit** %31, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %28, %Tuple* null) - %32 = sub i64 %numControlPairs, 1 - %33 = sub i64 %32, 0 - %34 = sdiv i64 %33, 1 - %35 = mul i64 1, %34 - %36 = add i64 0, %35 - %37 = load %Range, %Range* @EmptyRange, align 4 - %38 = insertvalue %Range %37, i64 %36, 0 - %39 = insertvalue %Range %38, i64 -1, 1 - %40 = insertvalue %Range %39, i64 0, 2 - %41 = extractvalue %Range %40, 0 - %42 = extractvalue %Range %40, 1 - %43 = extractvalue %Range %40, 2 - br label %preheader__1 - -preheader__1: ; preds = %condContinue__1 - %44 = icmp sgt i64 %42, 0 - br label %header__2 - -header__2: ; preds = %exiting__2, %preheader__1 - %__qsVar0____qsVar0__numPair____ = phi i64 [ %41, %preheader__1 ], [ %60, %exiting__2 ] - %45 = icmp sle i64 %__qsVar0____qsVar0__numPair____, %43 - %46 = icmp sge i64 %__qsVar0____qsVar0__numPair____, %43 - %47 = select i1 %44, i1 %45, i1 %46 - br i1 %47, label %body__2, label %exit__2 - -body__2: ; preds = %header__2 - %48 = mul i64 2, %__qsVar0____qsVar0__numPair____ - %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %48) - %50 = bitcast i8* %49 to %Qubit** - %51 = load %Qubit*, %Qubit** %50, align 8 - %52 = mul i64 2, %__qsVar0____qsVar0__numPair____ - %53 = add i64 %52, 1 - %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %53) - %55 = bitcast i8* %54 to %Qubit** - %56 = load %Qubit*, %Qubit** %55, align 8 - %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0____qsVar0__numPair____) - %58 = bitcast i8* %57 to %Qubit** - %59 = load %Qubit*, %Qubit** %58, align 8 - call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %51, %Qubit* %56, %Qubit* %59) - br label %exiting__2 - -exiting__2: ; preds = %body__2 - %60 = add i64 %__qsVar0____qsVar0__numPair____, %42 - br label %header__2 - -exit__2: ; preds = %header__2 - call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %temps) - call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__H__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1, align 8 - call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %2) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__H__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1, align 8 - call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %2) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__H__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %3, %Qubit* %4) - ret void -} - -define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { -entry: - %0 = call %Result* @__quantum__qis__m__body(%Qubit* %qubit) - ret %Result* %0 -} - -declare %Result* @__quantum__qis__m__body(%Qubit*) - -define internal void @Microsoft__Quantum__Intrinsic__R__body(i2 %pauli, double %theta, %Qubit* %qubit) { -entry: - %0 = load i2, i2* @PauliX, align 1 - %1 = icmp eq i2 %pauli, %0 - br i1 %1, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - call void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qubit) - br label %continue__1 - -test1__1: ; preds = %entry - %2 = load i2, i2* @PauliY, align 1 - %3 = icmp eq i2 %pauli, %2 - br i1 %3, label %then1__1, label %test2__1 - -then1__1: ; preds = %test1__1 - call void @Microsoft__Quantum__Intrinsic__Ry__body(double %theta, %Qubit* %qubit) - br label %continue__1 - -test2__1: ; preds = %test1__1 - %4 = load i2, i2* @PauliZ, align 1 - %5 = icmp eq i2 %pauli, %4 - br i1 %5, label %then2__1, label %else__1 - -then2__1: ; preds = %test2__1 - call void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qubit) - br label %continue__1 - -else__1: ; preds = %test2__1 - %6 = fneg double %theta - %7 = fdiv double %6, 2.000000e+00 - call void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____body(double %7) - br label %continue__1 - -continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qubit) { -entry: - call void @__quantum__qis__rx(double %theta, %Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Ry__body(double %theta, %Qubit* %qubit) { -entry: - call void @__quantum__qis__ry(double %theta, %Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qubit) { -entry: - call void @__quantum__qis__rz(double %theta, %Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__R__adj(i2 %pauli, double %theta, %Qubit* %qubit) { -entry: - %0 = load i2, i2* @PauliX, align 1 - %1 = icmp eq i2 %pauli, %0 - br i1 %1, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - call void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qubit) - br label %continue__1 - -test1__1: ; preds = %entry - %2 = load i2, i2* @PauliY, align 1 - %3 = icmp eq i2 %pauli, %2 - br i1 %3, label %then1__1, label %test2__1 - -then1__1: ; preds = %test1__1 - call void @Microsoft__Quantum__Intrinsic__Ry__adj(double %theta, %Qubit* %qubit) - br label %continue__1 - -test2__1: ; preds = %test1__1 - %4 = load i2, i2* @PauliZ, align 1 - %5 = icmp eq i2 %pauli, %4 - br i1 %5, label %then2__1, label %else__1 - -then2__1: ; preds = %test2__1 - call void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qubit) - br label %continue__1 - -else__1: ; preds = %test2__1 - %6 = fneg double %theta - %7 = fdiv double %6, 2.000000e+00 - call void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____adj(double %7) - br label %continue__1 - -continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qubit) { -entry: - %0 = fneg double %theta - call void @Microsoft__Quantum__Intrinsic__Rx__body(double %0, %Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Ry__adj(double %theta, %Qubit* %qubit) { -entry: - %0 = fneg double %theta - call void @Microsoft__Quantum__Intrinsic__Ry__body(double %0, %Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qubit) { -entry: - %0 = fneg double %theta - call void @Microsoft__Quantum__Intrinsic__Rz__body(double %0, %Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 - %pauli = load i2, i2* %1, align 1 - %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 - %theta = load double, double* %2, align 8 - %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 - %qubit = load %Qubit*, %Qubit** %3, align 8 - %4 = load i2, i2* @PauliX, align 1 - %5 = icmp eq i2 %pauli, %4 - br i1 %5, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %7 = bitcast %Tuple* %6 to { double, %Qubit* }* - %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 - %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 - store double %theta, double* %8, align 8 - store %Qubit* %qubit, %Qubit** %9, align 8 - call void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %7) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) - br label %continue__1 - -test1__1: ; preds = %entry - %10 = load i2, i2* @PauliY, align 1 - %11 = icmp eq i2 %pauli, %10 - br i1 %11, label %then1__1, label %test2__1 - -then1__1: ; preds = %test1__1 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %13 = bitcast %Tuple* %12 to { double, %Qubit* }* - %14 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %13, i32 0, i32 1 - store double %theta, double* %14, align 8 - store %Qubit* %qubit, %Qubit** %15, align 8 - call void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %13) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - br label %continue__1 - -test2__1: ; preds = %test1__1 - %16 = load i2, i2* @PauliZ, align 1 - %17 = icmp eq i2 %pauli, %16 - br i1 %17, label %then2__1, label %else__1 - -then2__1: ; preds = %test2__1 - %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %19 = bitcast %Tuple* %18 to { double, %Qubit* }* - %20 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %19, i32 0, i32 0 - %21 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %19, i32 0, i32 1 - store double %theta, double* %20, align 8 - store %Qubit* %qubit, %Qubit** %21, align 8 - call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %19) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) - br label %continue__1 - -else__1: ; preds = %test2__1 - %22 = fneg double %theta - %23 = fdiv double %22, 2.000000e+00 - call void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____ctl(%Array* %__controlQubits__, double %23) - br label %continue__1 - -continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %ctls, { double, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %theta = load double, double* %1, align 8 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %qubit = load %Qubit*, %Qubit** %2, align 8 - %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %4 = icmp eq i64 %3, 0 - br i1 %4, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__rx(double %theta, %Qubit* %qubit) - br label %continue__1 - -test1__1: ; preds = %entry - %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %6 = icmp eq i64 %5, 1 - br i1 %6, label %then1__1, label %else__1 - -then1__1: ; preds = %test1__1 - %7 = load i2, i2* @PauliZ, align 1 - %8 = load i2, i2* @PauliX, align 1 - call void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____body(%Qubit* %qubit, i2 %7, i2 %8) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %10 = bitcast %Tuple* %9 to { double, %Qubit* }* - %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 1 - store double %theta, double* %11, align 8 - store %Qubit* %qubit, %Qubit** %12, align 8 - call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %10) - %13 = load i2, i2* @PauliZ, align 1 - %14 = load i2, i2* @PauliX, align 1 - call void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____adj(%Qubit* %qubit, i2 %13, i2 %14) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) - br label %continue__1 - -else__1: ; preds = %test1__1 - %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Rx, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) - call void @__quantum__rt__callable_make_controlled(%Callable* %15) - %16 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %17 = bitcast %Tuple* %16 to { %Array*, { double, %Qubit* }* }* - %18 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %17, i32 0, i32 0 - %19 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %17, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %21 = bitcast %Tuple* %20 to { double, %Qubit* }* - %22 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %21, i32 0, i32 0 - %23 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %21, i32 0, i32 1 - store double %theta, double* %22, align 8 - store %Qubit* %qubit, %Qubit** %23, align 8 - store %Array* %ctls, %Array** %18, align 8 - store { double, %Qubit* }* %21, { double, %Qubit* }** %19, align 8 - call void @Microsoft__Quantum__Intrinsic___7f72c45e20854241afccc66f6e99a31b___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) - call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) - br label %continue__1 - -continue__1: ; preds = %else__1, %then1__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %ctls, { double, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %theta = load double, double* %1, align 8 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %qubit = load %Qubit*, %Qubit** %2, align 8 - %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %4 = icmp eq i64 %3, 0 - br i1 %4, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__ry(double %theta, %Qubit* %qubit) - br label %continue__1 - -test1__1: ; preds = %entry - %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %6 = icmp eq i64 %5, 1 - br i1 %6, label %then1__1, label %else__1 - -then1__1: ; preds = %test1__1 - %7 = load i2, i2* @PauliZ, align 1 - %8 = load i2, i2* @PauliY, align 1 - call void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____body(%Qubit* %qubit, i2 %7, i2 %8) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %10 = bitcast %Tuple* %9 to { double, %Qubit* }* - %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 1 - store double %theta, double* %11, align 8 - store %Qubit* %qubit, %Qubit** %12, align 8 - call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %10) - %13 = load i2, i2* @PauliZ, align 1 - %14 = load i2, i2* @PauliY, align 1 - call void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____adj(%Qubit* %qubit, i2 %13, i2 %14) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) - br label %continue__1 - -else__1: ; preds = %test1__1 - %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Ry, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) - call void @__quantum__rt__callable_make_controlled(%Callable* %15) - %16 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %17 = bitcast %Tuple* %16 to { %Array*, { double, %Qubit* }* }* - %18 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %17, i32 0, i32 0 - %19 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %17, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %21 = bitcast %Tuple* %20 to { double, %Qubit* }* - %22 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %21, i32 0, i32 0 - %23 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %21, i32 0, i32 1 - store double %theta, double* %22, align 8 - store %Qubit* %qubit, %Qubit** %23, align 8 - store %Array* %ctls, %Array** %18, align 8 - store { double, %Qubit* }* %21, { double, %Qubit* }** %19, align 8 - call void @Microsoft__Quantum__Intrinsic___7f72c45e20854241afccc66f6e99a31b___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) - call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) - br label %continue__1 - -continue__1: ; preds = %else__1, %then1__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %theta = load double, double* %1, align 8 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %qubit = load %Qubit*, %Qubit** %2, align 8 - %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %4 = icmp eq i64 %3, 0 - br i1 %4, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - call void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qubit) - br label %continue__1 - -test1__1: ; preds = %entry - %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %6 = icmp eq i64 %5, 1 - br i1 %6, label %then1__1, label %else__1 - -then1__1: ; preds = %test1__1 - %7 = fdiv double %theta, 2.000000e+00 - call void @Microsoft__Quantum__Intrinsic__Rz__body(double %7, %Qubit* %qubit) - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %9 = bitcast i8* %8 to %Qubit** - %10 = load %Qubit*, %Qubit** %9, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %10, %Qubit* %qubit) - %11 = fneg double %theta - %12 = fdiv double %11, 2.000000e+00 - call void @Microsoft__Quantum__Intrinsic__Rz__body(double %12, %Qubit* %qubit) - %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %14 = bitcast i8* %13 to %Qubit** - %15 = load %Qubit*, %Qubit** %14, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %15, %Qubit* %qubit) - br label %continue__1 - -else__1: ; preds = %test1__1 - %16 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Rz, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) - call void @__quantum__rt__callable_make_controlled(%Callable* %16) - %17 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %18 = bitcast %Tuple* %17 to { %Array*, { double, %Qubit* }* }* - %19 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %18, i32 0, i32 0 - %20 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %18, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %22 = bitcast %Tuple* %21 to { double, %Qubit* }* - %23 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %22, i32 0, i32 0 - %24 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %22, i32 0, i32 1 - store double %theta, double* %23, align 8 - store %Qubit* %qubit, %Qubit** %24, align 8 - store %Array* %ctls, %Array** %19, align 8 - store { double, %Qubit* }* %22, { double, %Qubit* }** %20, align 8 - call void @Microsoft__Quantum__Intrinsic___7f72c45e20854241afccc66f6e99a31b___QsRef23__ApplyWithLessControlsA____body(%Callable* %16, { %Array*, { double, %Qubit* }* }* %18) - call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %16, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) - br label %continue__1 - -continue__1: ; preds = %else__1, %then1__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 - %pauli = load i2, i2* %1, align 1 - %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 - %theta = load double, double* %2, align 8 - %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 - %qubit = load %Qubit*, %Qubit** %3, align 8 - %4 = load i2, i2* @PauliX, align 1 - %5 = icmp eq i2 %pauli, %4 - br i1 %5, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %7 = bitcast %Tuple* %6 to { double, %Qubit* }* - %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 - %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 - store double %theta, double* %8, align 8 - store %Qubit* %qubit, %Qubit** %9, align 8 - call void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %7) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) - br label %continue__1 - -test1__1: ; preds = %entry - %10 = load i2, i2* @PauliY, align 1 - %11 = icmp eq i2 %pauli, %10 - br i1 %11, label %then1__1, label %test2__1 - -then1__1: ; preds = %test1__1 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %13 = bitcast %Tuple* %12 to { double, %Qubit* }* - %14 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %13, i32 0, i32 1 - store double %theta, double* %14, align 8 - store %Qubit* %qubit, %Qubit** %15, align 8 - call void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %13) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - br label %continue__1 - -test2__1: ; preds = %test1__1 - %16 = load i2, i2* @PauliZ, align 1 - %17 = icmp eq i2 %pauli, %16 - br i1 %17, label %then2__1, label %else__1 - -then2__1: ; preds = %test2__1 - %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %19 = bitcast %Tuple* %18 to { double, %Qubit* }* - %20 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %19, i32 0, i32 0 - %21 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %19, i32 0, i32 1 - store double %theta, double* %20, align 8 - store %Qubit* %qubit, %Qubit** %21, align 8 - call void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %19) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) - br label %continue__1 - -else__1: ; preds = %test2__1 - %22 = fneg double %theta - %23 = fdiv double %22, 2.000000e+00 - call void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____ctladj(%Array* %__controlQubits__, double %23) - br label %continue__1 - -continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %theta = load double, double* %1, align 8 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %qubit = load %Qubit*, %Qubit** %2, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %4 = bitcast %Tuple* %3 to { double, %Qubit* }* - %5 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 1 - %7 = fneg double %theta - store double %7, double* %5, align 8 - store %Qubit* %qubit, %Qubit** %6, align 8 - call void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %4) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %theta = load double, double* %1, align 8 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %qubit = load %Qubit*, %Qubit** %2, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %4 = bitcast %Tuple* %3 to { double, %Qubit* }* - %5 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 1 - %7 = fneg double %theta - store double %7, double* %5, align 8 - store %Qubit* %qubit, %Qubit** %6, align 8 - call void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %4) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %theta = load double, double* %1, align 8 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %qubit = load %Qubit*, %Qubit** %2, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) - %4 = bitcast %Tuple* %3 to { double, %Qubit* }* - %5 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 1 - %7 = fneg double %theta - store double %7, double* %5, align 8 - store %Qubit* %qubit, %Qubit** %6, align 8 - call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %4) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__R1__body(double %theta, %Qubit* %qubit) { -entry: - %0 = load i2, i2* @PauliZ, align 1 - call void @Microsoft__Quantum__Intrinsic__R__body(i2 %0, double %theta, %Qubit* %qubit) - %1 = load i2, i2* @PauliI, align 1 - %2 = fneg double %theta - call void @Microsoft__Quantum__Intrinsic__R__body(i2 %1, double %2, %Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__R1__adj(double %theta, %Qubit* %qubit) { -entry: - %0 = load i2, i2* @PauliI, align 1 - %1 = fneg double %theta - call void @Microsoft__Quantum__Intrinsic__R__adj(i2 %0, double %1, %Qubit* %qubit) - %2 = load i2, i2* @PauliZ, align 1 - call void @Microsoft__Quantum__Intrinsic__R__adj(i2 %2, double %theta, %Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__R1Frac__body(i64 %numerator, i64 %power, %Qubit* %qubit) { -entry: - %0 = load i2, i2* @PauliZ, align 1 - %1 = sub i64 0, %numerator - %2 = add i64 %power, 1 - call void @Microsoft__Quantum__Intrinsic__RFrac__body(i2 %0, i64 %1, i64 %2, %Qubit* %qubit) - %3 = load i2, i2* @PauliI, align 1 - %4 = add i64 %power, 1 - call void @Microsoft__Quantum__Intrinsic__RFrac__body(i2 %3, i64 %numerator, i64 %4, %Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__RFrac__body(i2 %pauli, i64 %numerator, i64 %power, %Qubit* %qubit) { -entry: - %0 = call double @Microsoft__Quantum__Math__PI__body() - %1 = fmul double -2.000000e+00, %0 - %2 = sitofp i64 %numerator to double - %3 = fmul double %1, %2 - %4 = sitofp i64 %power to double - %5 = call double @llvm.pow.f64(double 2.000000e+00, double %4) - %angle = fdiv double %3, %5 - call void @Microsoft__Quantum__Intrinsic__R__body(i2 %pauli, double %angle, %Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__R1Frac__adj(i64 %numerator, i64 %power, %Qubit* %qubit) { -entry: - %0 = load i2, i2* @PauliI, align 1 - %1 = add i64 %power, 1 - call void @Microsoft__Quantum__Intrinsic__RFrac__adj(i2 %0, i64 %numerator, i64 %1, %Qubit* %qubit) - %2 = load i2, i2* @PauliZ, align 1 - %3 = sub i64 0, %numerator - %4 = add i64 %power, 1 - call void @Microsoft__Quantum__Intrinsic__RFrac__adj(i2 %2, i64 %3, i64 %4, %Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__RFrac__adj(i2 %pauli, i64 %numerator, i64 %power, %Qubit* %qubit) { -entry: - %0 = call double @Microsoft__Quantum__Math__PI__body() - %1 = fmul double -2.000000e+00, %0 - %2 = sitofp i64 %numerator to double - %3 = fmul double %1, %2 - %4 = sitofp i64 %power to double - %5 = call double @llvm.pow.f64(double 2.000000e+00, double %4) - %__qsVar0__angle__ = fdiv double %3, %5 - call void @Microsoft__Quantum__Intrinsic__R__adj(i2 %pauli, double %__qsVar0__angle__, %Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__R1Frac__ctl(%Array* %__controlQubits__, { i64, i64, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 0 - %numerator = load i64, i64* %1, align 4 - %2 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 1 - %power = load i64, i64* %2, align 4 - %3 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 2 - %qubit = load %Qubit*, %Qubit** %3, align 8 - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, i64, i64, %Qubit* }* getelementptr ({ i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* null, i32 1) to i64)) - %5 = bitcast %Tuple* %4 to { i2, i64, i64, %Qubit* }* - %6 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 1 - %8 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 2 - %9 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 3 - %10 = load i2, i2* @PauliZ, align 1 - %11 = sub i64 0, %numerator - %12 = add i64 %power, 1 - store i2 %10, i2* %6, align 1 - store i64 %11, i64* %7, align 4 - store i64 %12, i64* %8, align 4 - store %Qubit* %qubit, %Qubit** %9, align 8 - call void @Microsoft__Quantum__Intrinsic__RFrac__ctl(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %5) - %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, i64, i64, %Qubit* }* getelementptr ({ i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* null, i32 1) to i64)) - %14 = bitcast %Tuple* %13 to { i2, i64, i64, %Qubit* }* - %15 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %14, i32 0, i32 0 - %16 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %14, i32 0, i32 1 - %17 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %14, i32 0, i32 2 - %18 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %14, i32 0, i32 3 - %19 = load i2, i2* @PauliI, align 1 - %20 = add i64 %power, 1 - store i2 %19, i2* %15, align 1 - store i64 %numerator, i64* %16, align 4 - store i64 %20, i64* %17, align 4 - store %Qubit* %qubit, %Qubit** %18, align 8 - call void @Microsoft__Quantum__Intrinsic__RFrac__ctl(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %14) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__RFrac__ctl(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 0 - %pauli = load i2, i2* %1, align 1 - %2 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 1 - %numerator = load i64, i64* %2, align 4 - %3 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 2 - %power = load i64, i64* %3, align 4 - %4 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 3 - %qubit = load %Qubit*, %Qubit** %4, align 8 - %5 = call double @Microsoft__Quantum__Math__PI__body() - %6 = fmul double -2.000000e+00, %5 - %7 = sitofp i64 %numerator to double - %8 = fmul double %6, %7 - %9 = sitofp i64 %power to double - %10 = call double @llvm.pow.f64(double 2.000000e+00, double %9) - %angle = fdiv double %8, %10 - %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) - %12 = bitcast %Tuple* %11 to { i2, double, %Qubit* }* - %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 0 - %14 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 1 - %15 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 2 - store i2 %pauli, i2* %13, align 1 - store double %angle, double* %14, align 8 - store %Qubit* %qubit, %Qubit** %15, align 8 - call void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %12) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__R1Frac__ctladj(%Array* %__controlQubits__, { i64, i64, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 0 - %numerator = load i64, i64* %1, align 4 - %2 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 1 - %power = load i64, i64* %2, align 4 - %3 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 2 - %qubit = load %Qubit*, %Qubit** %3, align 8 - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, i64, i64, %Qubit* }* getelementptr ({ i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* null, i32 1) to i64)) - %5 = bitcast %Tuple* %4 to { i2, i64, i64, %Qubit* }* - %6 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 1 - %8 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 2 - %9 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 3 - %10 = load i2, i2* @PauliI, align 1 - %11 = add i64 %power, 1 - store i2 %10, i2* %6, align 1 - store i64 %numerator, i64* %7, align 4 - store i64 %11, i64* %8, align 4 - store %Qubit* %qubit, %Qubit** %9, align 8 - call void @Microsoft__Quantum__Intrinsic__RFrac__ctladj(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %5) - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, i64, i64, %Qubit* }* getelementptr ({ i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* null, i32 1) to i64)) - %13 = bitcast %Tuple* %12 to { i2, i64, i64, %Qubit* }* - %14 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %13, i32 0, i32 1 - %16 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %13, i32 0, i32 2 - %17 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %13, i32 0, i32 3 - %18 = load i2, i2* @PauliZ, align 1 - %19 = sub i64 0, %numerator - %20 = add i64 %power, 1 - store i2 %18, i2* %14, align 1 - store i64 %19, i64* %15, align 4 - store i64 %20, i64* %16, align 4 - store %Qubit* %qubit, %Qubit** %17, align 8 - call void @Microsoft__Quantum__Intrinsic__RFrac__ctladj(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %13) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__RFrac__ctladj(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - %1 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 0 - %pauli = load i2, i2* %1, align 1 - %2 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 1 - %numerator = load i64, i64* %2, align 4 - %3 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 2 - %power = load i64, i64* %3, align 4 - %4 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 3 - %qubit = load %Qubit*, %Qubit** %4, align 8 - %5 = call double @Microsoft__Quantum__Math__PI__body() - %6 = fmul double -2.000000e+00, %5 - %7 = sitofp i64 %numerator to double - %8 = fmul double %6, %7 - %9 = sitofp i64 %power to double - %10 = call double @llvm.pow.f64(double 2.000000e+00, double %9) - %__qsVar0__angle__ = fdiv double %8, %10 - %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) - %12 = bitcast %Tuple* %11 to { i2, double, %Qubit* }* - %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 0 - %14 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 1 - %15 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 2 - store i2 %pauli, i2* %13, align 1 - store double %__qsVar0__angle__, double* %14, align 8 - store %Qubit* %qubit, %Qubit** %15, align 8 - call void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %12) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) { -entry: - call void @__quantum__qis__reset__body(%Qubit* %qubit) - ret void -} - -declare void @__quantum__qis__reset__body(%Qubit*) - -; Function Attrs: nounwind readnone speculatable willreturn -declare double @llvm.pow.f64(double, double) #0 - -declare void @__quantum__qis__rx(double, %Qubit*) - -define internal void @Microsoft__Quantum__Intrinsic___7f72c45e20854241afccc66f6e99a31b___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) - %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 - %controls = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) - %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 - %arg = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 - %3 = bitcast { double, %Qubit* }* %arg to %Tuple* - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) - %numControls = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) - %numControlPairs = sdiv i64 %numControls, 2 - %temps = call %Array* @__quantum__rt__qubit_allocate_array(i64 %numControlPairs) - call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 1) - %4 = sub i64 %numControlPairs, 1 - br label %header__1 - -header__1: ; preds = %exiting__1, %entry - %__qsVar0__numPair__ = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] - %5 = icmp sle i64 %__qsVar0__numPair__, %4 - br i1 %5, label %body__1, label %exit__1 - -body__1: ; preds = %header__1 - %6 = mul i64 2, %__qsVar0__numPair__ - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %6) - %8 = bitcast i8* %7 to %Qubit** - %9 = load %Qubit*, %Qubit** %8, align 8 - %10 = mul i64 2, %__qsVar0__numPair__ - %11 = add i64 %10, 1 - %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %11) - %13 = bitcast i8* %12 to %Qubit** - %14 = load %Qubit*, %Qubit** %13, align 8 - %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0__numPair__) - %16 = bitcast i8* %15 to %Qubit** - %17 = load %Qubit*, %Qubit** %16, align 8 - call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %9, %Qubit* %14, %Qubit* %17) - br label %exiting__1 - -exiting__1: ; preds = %body__1 - %18 = add i64 %__qsVar0__numPair__, 1 - br label %header__1 - -exit__1: ; preds = %header__1 - %19 = srem i64 %numControls, 2 - %20 = icmp eq i64 %19, 0 - br i1 %20, label %condTrue__1, label %condFalse__1 - -condTrue__1: ; preds = %exit__1 - call void @__quantum__rt__array_update_reference_count(%Array* %temps, i32 1) - br label %condContinue__1 - -condFalse__1: ; preds = %exit__1 - %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) - %23 = bitcast i8* %22 to %Qubit** - %24 = sub i64 %numControls, 1 - %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %24) - %26 = bitcast i8* %25 to %Qubit** - %27 = load %Qubit*, %Qubit** %26, align 8 - store %Qubit* %27, %Qubit** %23, align 8 - %28 = call %Array* @__quantum__rt__array_concatenate(%Array* %temps, %Array* %21) - call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) - call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) - br label %condContinue__1 - -condContinue__1: ; preds = %condFalse__1, %condTrue__1 - %__qsVar1__newControls__ = phi %Array* [ %temps, %condTrue__1 ], [ %28, %condFalse__1 ] - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 1) - %29 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %30 = bitcast %Tuple* %29 to { %Array*, { double, %Qubit* }* }* - %31 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %30, i32 0, i32 0 - %32 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %30, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) - store %Array* %__qsVar1__newControls__, %Array** %31, align 8 - store { double, %Qubit* }* %arg, { double, %Qubit* }** %32, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %29, %Tuple* null) - %33 = sub i64 %numControlPairs, 1 - %34 = sub i64 %33, 0 - %35 = sdiv i64 %34, 1 - %36 = mul i64 1, %35 - %37 = add i64 0, %36 - %38 = load %Range, %Range* @EmptyRange, align 4 - %39 = insertvalue %Range %38, i64 %37, 0 - %40 = insertvalue %Range %39, i64 -1, 1 - %41 = insertvalue %Range %40, i64 0, 2 - %42 = extractvalue %Range %41, 0 - %43 = extractvalue %Range %41, 1 - %44 = extractvalue %Range %41, 2 - br label %preheader__1 - -preheader__1: ; preds = %condContinue__1 - %45 = icmp sgt i64 %43, 0 - br label %header__2 - -header__2: ; preds = %exiting__2, %preheader__1 - %__qsVar0____qsVar0__numPair____ = phi i64 [ %42, %preheader__1 ], [ %61, %exiting__2 ] - %46 = icmp sle i64 %__qsVar0____qsVar0__numPair____, %44 - %47 = icmp sge i64 %__qsVar0____qsVar0__numPair____, %44 - %48 = select i1 %45, i1 %46, i1 %47 - br i1 %48, label %body__2, label %exit__2 - -body__2: ; preds = %header__2 - %49 = mul i64 2, %__qsVar0____qsVar0__numPair____ - %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %49) - %51 = bitcast i8* %50 to %Qubit** - %52 = load %Qubit*, %Qubit** %51, align 8 - %53 = mul i64 2, %__qsVar0____qsVar0__numPair____ - %54 = add i64 %53, 1 - %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %54) - %56 = bitcast i8* %55 to %Qubit** - %57 = load %Qubit*, %Qubit** %56, align 8 - %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0____qsVar0__numPair____) - %59 = bitcast i8* %58 to %Qubit** - %60 = load %Qubit*, %Qubit** %59, align 8 - call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %52, %Qubit* %57, %Qubit* %60) - br label %exiting__2 - -exiting__2: ; preds = %body__2 - %61 = add i64 %__qsVar0____qsVar0__numPair____, %43 - br label %header__2 - -exit__2: ; preds = %header__2 - call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %temps) - call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rx__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %3 = load double, double* %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Rx__body(double %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rx__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %3 = load double, double* %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Rx__adj(double %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rx__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* - %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %3, { double, %Qubit* }* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rx__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* - %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %3, { double, %Qubit* }* %4) - ret void -} - -declare void @__quantum__qis__ry(double, %Qubit*) - -define internal void @Microsoft__Quantum__Intrinsic__Ry__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %3 = load double, double* %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Ry__body(double %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Ry__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %3 = load double, double* %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Ry__adj(double %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Ry__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* - %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %3, { double, %Qubit* }* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Ry__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* - %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %3, { double, %Qubit* }* %4) - ret void -} - -declare void @__quantum__qis__rz(double, %Qubit*) - -define internal void @Microsoft__Quantum__Intrinsic__Rz__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %3 = load double, double* %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Rz__body(double %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rz__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* - %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 - %3 = load double, double* %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Rz__adj(double %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rz__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* - %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %3, { double, %Qubit* }* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Rz__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* - %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %3, { double, %Qubit* }* %4) - ret void -} - -declare void @__quantum__qis__s(%Qubit*) - -declare void @__quantum__qis__sadj(%Qubit*) - -define internal void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %ctls, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 0 - br i1 %1, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__s(%Qubit* %qubit) - br label %continue__1 - -test1__1: ; preds = %entry - %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %3 = icmp eq i64 %2, 1 - br i1 %3, label %then1__1, label %else__1 - -then1__1: ; preds = %test1__1 - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %5 = bitcast i8* %4 to %Qubit** - %6 = load %Qubit*, %Qubit** %5, align 8 - call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %6) - call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %8 = bitcast i8* %7 to %Qubit** - %9 = load %Qubit*, %Qubit** %8, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %9, %Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) - %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %11 = bitcast i8* %10 to %Qubit** - %12 = load %Qubit*, %Qubit** %11, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %12, %Qubit* %qubit) - br label %continue__1 - -else__1: ; preds = %test1__1 - %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) - call void @__quantum__rt__callable_make_controlled(%Callable* %13) - %14 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %15 = bitcast %Tuple* %14 to { %Array*, %Qubit* }* - %16 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 0 - %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Array* %ctls, %Array** %16, align 8 - store %Qubit* %qubit, %Qubit** %17, align 8 - call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) - br label %continue__1 - -continue__1: ; preds = %else__1, %then1__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__S__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1, align 8 - call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %2) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__S__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1, align 8 - call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %2) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__S__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %ctls, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 0 - br i1 %1, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__sadj(%Qubit* %qubit) - br label %continue__1 - -test1__1: ; preds = %entry - %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %3 = icmp eq i64 %2, 1 - br i1 %3, label %then1__1, label %else__1 - -then1__1: ; preds = %test1__1 - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %5 = bitcast i8* %4 to %Qubit** - %6 = load %Qubit*, %Qubit** %5, align 8 - call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %6) - call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %8 = bitcast i8* %7 to %Qubit** - %9 = load %Qubit*, %Qubit** %8, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %9, %Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) - %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %11 = bitcast i8* %10 to %Qubit** - %12 = load %Qubit*, %Qubit** %11, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %12, %Qubit* %qubit) - br label %continue__1 - -else__1: ; preds = %test1__1 - %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) - call void @__quantum__rt__callable_make_adjoint(%Callable* %13) - call void @__quantum__rt__callable_make_controlled(%Callable* %13) - %14 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %15 = bitcast %Tuple* %14 to { %Array*, %Qubit* }* - %16 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 0 - %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Array* %ctls, %Array** %16, align 8 - store %Qubit* %qubit, %Qubit** %17, align 8 - call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) - br label %continue__1 - -continue__1: ; preds = %else__1, %then1__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - ret void -} - -declare void @__quantum__rt__callable_make_adjoint(%Callable*) - -declare void @__quantum__qis__t(%Qubit*) - -declare void @__quantum__qis__tadj(%Qubit*) - -define internal void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %ctls, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 0 - br i1 %1, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__t(%Qubit* %qubit) - br label %continue__1 - -test1__1: ; preds = %entry - %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %3 = icmp eq i64 %2, 1 - br i1 %3, label %then1__1, label %else__1 - -then1__1: ; preds = %test1__1 - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %5 = bitcast i8* %4 to %Qubit** - %6 = load %Qubit*, %Qubit** %5, align 8 - call void @Microsoft__Quantum__Intrinsic__R1Frac__body(i64 1, i64 3, %Qubit* %6) - call void @Microsoft__Quantum__Intrinsic__R1Frac__body(i64 1, i64 3, %Qubit* %qubit) - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %8 = bitcast i8* %7 to %Qubit** - %9 = load %Qubit*, %Qubit** %8, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %9, %Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__R1Frac__adj(i64 1, i64 3, %Qubit* %qubit) - %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %11 = bitcast i8* %10 to %Qubit** - %12 = load %Qubit*, %Qubit** %11, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %12, %Qubit* %qubit) - br label %continue__1 - -else__1: ; preds = %test1__1 - %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__T, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) - call void @__quantum__rt__callable_make_controlled(%Callable* %13) - %14 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %15 = bitcast %Tuple* %14 to { %Array*, %Qubit* }* - %16 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 0 - %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Array* %ctls, %Array** %16, align 8 - store %Qubit* %qubit, %Qubit** %17, align 8 - call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) - br label %continue__1 - -continue__1: ; preds = %else__1, %then1__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__T__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1, align 8 - call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %2) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__T__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1, align 8 - call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %2) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__T__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__T__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %ctls, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 0 - br i1 %1, label %then0__1, label %test1__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__tadj(%Qubit* %qubit) - br label %continue__1 - -test1__1: ; preds = %entry - %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %3 = icmp eq i64 %2, 1 - br i1 %3, label %then1__1, label %else__1 - -then1__1: ; preds = %test1__1 - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %5 = bitcast i8* %4 to %Qubit** - %6 = load %Qubit*, %Qubit** %5, align 8 - call void @Microsoft__Quantum__Intrinsic__R1Frac__adj(i64 1, i64 3, %Qubit* %6) - call void @Microsoft__Quantum__Intrinsic__R1Frac__adj(i64 1, i64 3, %Qubit* %qubit) - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %8 = bitcast i8* %7 to %Qubit** - %9 = load %Qubit*, %Qubit** %8, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %9, %Qubit* %qubit) - call void @Microsoft__Quantum__Intrinsic__R1Frac__body(i64 1, i64 3, %Qubit* %qubit) - %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %11 = bitcast i8* %10 to %Qubit** - %12 = load %Qubit*, %Qubit** %11, align 8 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %12, %Qubit* %qubit) - br label %continue__1 - -else__1: ; preds = %test1__1 - %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__T, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) - call void @__quantum__rt__callable_make_adjoint(%Callable* %13) - call void @__quantum__rt__callable_make_controlled(%Callable* %13) - %14 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %15 = bitcast %Tuple* %14 to { %Array*, %Qubit* }* - %16 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 0 - %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Array* %ctls, %Array** %16, align 8 - store %Qubit* %qubit, %Qubit** %17, align 8 - call void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) - br label %continue__1 - -continue__1: ; preds = %else__1, %then1__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { -entry: - call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__X__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1, align 8 - call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %2) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__X__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1, align 8 - call void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %2) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__X__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qubit) { -entry: - call void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Z__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1, align 8 - call void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %2) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Z__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1, align 8 - call void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %2) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Z__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Z__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Qubit*, %Qubit** %2, align 8 - call void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %3, %Qubit* %4) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) - call void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) - ret void -} - -declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) - -declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) - -declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) - -define internal void @Microsoft__Quantum__Intrinsic___8fb41246696c4c40aa9fa6f5871a34a7___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, %Qubit* }* %0) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %controls = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %arg = load %Qubit*, %Qubit** %2, align 8 - %__qsVar0__numControls__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) - %__qsVar1__numControlPairs__ = sdiv i64 %__qsVar0__numControls__, 2 - %__qsVar2__temps__ = call %Array* @__quantum__rt__qubit_allocate_array(i64 %__qsVar1__numControlPairs__) - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 1) - %3 = sub i64 %__qsVar1__numControlPairs__, 1 - br label %header__1 - -header__1: ; preds = %exiting__1, %entry - %__qsVar0____qsVar3__numPair____ = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] - %4 = icmp sle i64 %__qsVar0____qsVar3__numPair____, %3 - br i1 %4, label %body__1, label %exit__1 - -body__1: ; preds = %header__1 - %5 = mul i64 2, %__qsVar0____qsVar3__numPair____ - %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %5) - %7 = bitcast i8* %6 to %Qubit** - %8 = load %Qubit*, %Qubit** %7, align 8 - %9 = mul i64 2, %__qsVar0____qsVar3__numPair____ - %10 = add i64 %9, 1 - %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %10) - %12 = bitcast i8* %11 to %Qubit** - %13 = load %Qubit*, %Qubit** %12, align 8 - %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar3__numPair____) - %15 = bitcast i8* %14 to %Qubit** - %16 = load %Qubit*, %Qubit** %15, align 8 - call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %8, %Qubit* %13, %Qubit* %16) - br label %exiting__1 - -exiting__1: ; preds = %body__1 - %17 = add i64 %__qsVar0____qsVar3__numPair____, 1 - br label %header__1 - -exit__1: ; preds = %header__1 - %18 = srem i64 %__qsVar0__numControls__, 2 - %19 = icmp eq i64 %18, 0 - br i1 %19, label %condTrue__1, label %condFalse__1 - -condTrue__1: ; preds = %exit__1 - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__temps__, i32 1) - br label %condContinue__1 - -condFalse__1: ; preds = %exit__1 - %20 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 0) - %22 = bitcast i8* %21 to %Qubit** - %23 = sub i64 %__qsVar0__numControls__, 1 - %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %23) - %25 = bitcast i8* %24 to %Qubit** - %26 = load %Qubit*, %Qubit** %25, align 8 - store %Qubit* %26, %Qubit** %22, align 8 - %27 = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar2__temps__, %Array* %20) - call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 1) - call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) - br label %condContinue__1 - -condContinue__1: ; preds = %condFalse__1, %condTrue__1 - %__qsVar1____qsVar4__newControls____ = phi %Array* [ %__qsVar2__temps__, %condTrue__1 ], [ %27, %condFalse__1 ] - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) - %28 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %28) - %29 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %30 = bitcast %Tuple* %29 to { %Array*, %Qubit* }* - %31 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %30, i32 0, i32 0 - %32 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %30, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) - store %Array* %__qsVar1____qsVar4__newControls____, %Array** %31, align 8 - store %Qubit* %arg, %Qubit** %32, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %28, %Tuple* %29, %Tuple* null) - %33 = sub i64 %__qsVar1__numControlPairs__, 1 - %34 = sub i64 %33, 0 - %35 = sdiv i64 %34, 1 - %36 = mul i64 1, %35 - %37 = add i64 0, %36 - %38 = load %Range, %Range* @EmptyRange, align 4 - %39 = insertvalue %Range %38, i64 %37, 0 - %40 = insertvalue %Range %39, i64 -1, 1 - %41 = insertvalue %Range %40, i64 0, 2 - %42 = extractvalue %Range %41, 0 - %43 = extractvalue %Range %41, 1 - %44 = extractvalue %Range %41, 2 - br label %preheader__1 - -preheader__1: ; preds = %condContinue__1 - %45 = icmp sgt i64 %43, 0 - br label %header__2 - -header__2: ; preds = %exiting__2, %preheader__1 - %__qsVar0____qsVar0____qsVar3__numPair______ = phi i64 [ %42, %preheader__1 ], [ %61, %exiting__2 ] - %46 = icmp sle i64 %__qsVar0____qsVar0____qsVar3__numPair______, %44 - %47 = icmp sge i64 %__qsVar0____qsVar0____qsVar3__numPair______, %44 - %48 = select i1 %45, i1 %46, i1 %47 - br i1 %48, label %body__2, label %exit__2 - -body__2: ; preds = %header__2 - %49 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ - %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %49) - %51 = bitcast i8* %50 to %Qubit** - %52 = load %Qubit*, %Qubit** %51, align 8 - %53 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ - %54 = add i64 %53, 1 - %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %54) - %56 = bitcast i8* %55 to %Qubit** - %57 = load %Qubit*, %Qubit** %56, align 8 - %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar0____qsVar3__numPair______) - %59 = bitcast i8* %58 to %Qubit** - %60 = load %Qubit*, %Qubit** %59, align 8 - call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %52, %Qubit* %57, %Qubit* %60) - br label %exiting__2 - -exiting__2: ; preds = %body__2 - %61 = add i64 %__qsVar0____qsVar0____qsVar3__numPair______, %43 - br label %header__2 - -exit__2: ; preds = %header__2 - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %28, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %__qsVar2__temps__) - call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) - ret void -} - -declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) - -declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) - -define internal void @Microsoft__Quantum__Intrinsic___7f72c45e20854241afccc66f6e99a31b___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) - %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 - %controls = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) - %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 - %arg = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 - %3 = bitcast { double, %Qubit* }* %arg to %Tuple* - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) - %__qsVar0__numControls__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) - %__qsVar1__numControlPairs__ = sdiv i64 %__qsVar0__numControls__, 2 - %__qsVar2__temps__ = call %Array* @__quantum__rt__qubit_allocate_array(i64 %__qsVar1__numControlPairs__) - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 1) - %4 = sub i64 %__qsVar1__numControlPairs__, 1 - br label %header__1 - -header__1: ; preds = %exiting__1, %entry - %__qsVar0____qsVar3__numPair____ = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] - %5 = icmp sle i64 %__qsVar0____qsVar3__numPair____, %4 - br i1 %5, label %body__1, label %exit__1 - -body__1: ; preds = %header__1 - %6 = mul i64 2, %__qsVar0____qsVar3__numPair____ - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %6) - %8 = bitcast i8* %7 to %Qubit** - %9 = load %Qubit*, %Qubit** %8, align 8 - %10 = mul i64 2, %__qsVar0____qsVar3__numPair____ - %11 = add i64 %10, 1 - %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %11) - %13 = bitcast i8* %12 to %Qubit** - %14 = load %Qubit*, %Qubit** %13, align 8 - %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar3__numPair____) - %16 = bitcast i8* %15 to %Qubit** - %17 = load %Qubit*, %Qubit** %16, align 8 - call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %9, %Qubit* %14, %Qubit* %17) - br label %exiting__1 - -exiting__1: ; preds = %body__1 - %18 = add i64 %__qsVar0____qsVar3__numPair____, 1 - br label %header__1 - -exit__1: ; preds = %header__1 - %19 = srem i64 %__qsVar0__numControls__, 2 - %20 = icmp eq i64 %19, 0 - br i1 %20, label %condTrue__1, label %condFalse__1 - -condTrue__1: ; preds = %exit__1 - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__temps__, i32 1) - br label %condContinue__1 - -condFalse__1: ; preds = %exit__1 - %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) - %23 = bitcast i8* %22 to %Qubit** - %24 = sub i64 %__qsVar0__numControls__, 1 - %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %24) - %26 = bitcast i8* %25 to %Qubit** - %27 = load %Qubit*, %Qubit** %26, align 8 - store %Qubit* %27, %Qubit** %23, align 8 - %28 = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar2__temps__, %Array* %21) - call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) - call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) - br label %condContinue__1 - -condContinue__1: ; preds = %condFalse__1, %condTrue__1 - %__qsVar1____qsVar4__newControls____ = phi %Array* [ %__qsVar2__temps__, %condTrue__1 ], [ %28, %condFalse__1 ] - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) - %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %29) - %30 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %31 = bitcast %Tuple* %30 to { %Array*, { double, %Qubit* }* }* - %32 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %31, i32 0, i32 0 - %33 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %31, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) - store %Array* %__qsVar1____qsVar4__newControls____, %Array** %32, align 8 - store { double, %Qubit* }* %arg, { double, %Qubit* }** %33, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %30, %Tuple* null) - %34 = sub i64 %__qsVar1__numControlPairs__, 1 - %35 = sub i64 %34, 0 - %36 = sdiv i64 %35, 1 - %37 = mul i64 1, %36 - %38 = add i64 0, %37 - %39 = load %Range, %Range* @EmptyRange, align 4 - %40 = insertvalue %Range %39, i64 %38, 0 - %41 = insertvalue %Range %40, i64 -1, 1 - %42 = insertvalue %Range %41, i64 0, 2 - %43 = extractvalue %Range %42, 0 - %44 = extractvalue %Range %42, 1 - %45 = extractvalue %Range %42, 2 - br label %preheader__1 - -preheader__1: ; preds = %condContinue__1 - %46 = icmp sgt i64 %44, 0 - br label %header__2 - -header__2: ; preds = %exiting__2, %preheader__1 - %__qsVar0____qsVar0____qsVar3__numPair______ = phi i64 [ %43, %preheader__1 ], [ %62, %exiting__2 ] - %47 = icmp sle i64 %__qsVar0____qsVar0____qsVar3__numPair______, %45 - %48 = icmp sge i64 %__qsVar0____qsVar0____qsVar3__numPair______, %45 - %49 = select i1 %46, i1 %47, i1 %48 - br i1 %49, label %body__2, label %exit__2 - -body__2: ; preds = %header__2 - %50 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ - %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %50) - %52 = bitcast i8* %51 to %Qubit** - %53 = load %Qubit*, %Qubit** %52, align 8 - %54 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ - %55 = add i64 %54, 1 - %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %55) - %57 = bitcast i8* %56 to %Qubit** - %58 = load %Qubit*, %Qubit** %57, align 8 - %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar0____qsVar3__numPair______) - %60 = bitcast i8* %59 to %Qubit** - %61 = load %Qubit*, %Qubit** %60, align 8 - call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %53, %Qubit* %58, %Qubit* %61) - br label %exiting__2 - -exiting__2: ; preds = %body__2 - %62 = add i64 %__qsVar0____qsVar0____qsVar3__numPair______, %44 - br label %header__2 - -exit__2: ; preds = %header__2 - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %__qsVar2__temps__) - call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__Intrinsic___27e64f0afee94ef4bf9523108ce47367___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) - %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 - %controls = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) - %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 - %arg = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 - %3 = bitcast { %Qubit*, %Qubit* }* %arg to %Tuple* - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) - %__qsVar0__numControls__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) - %__qsVar1__numControlPairs__ = sdiv i64 %__qsVar0__numControls__, 2 - %__qsVar2__temps__ = call %Array* @__quantum__rt__qubit_allocate_array(i64 %__qsVar1__numControlPairs__) - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 1) - %4 = sub i64 %__qsVar1__numControlPairs__, 1 - br label %header__1 - -header__1: ; preds = %exiting__1, %entry - %__qsVar0____qsVar3__numPair____ = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] - %5 = icmp sle i64 %__qsVar0____qsVar3__numPair____, %4 - br i1 %5, label %body__1, label %exit__1 - -body__1: ; preds = %header__1 - %6 = mul i64 2, %__qsVar0____qsVar3__numPair____ - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %6) - %8 = bitcast i8* %7 to %Qubit** - %9 = load %Qubit*, %Qubit** %8, align 8 - %10 = mul i64 2, %__qsVar0____qsVar3__numPair____ - %11 = add i64 %10, 1 - %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %11) - %13 = bitcast i8* %12 to %Qubit** - %14 = load %Qubit*, %Qubit** %13, align 8 - %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar3__numPair____) - %16 = bitcast i8* %15 to %Qubit** - %17 = load %Qubit*, %Qubit** %16, align 8 - call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %9, %Qubit* %14, %Qubit* %17) - br label %exiting__1 - -exiting__1: ; preds = %body__1 - %18 = add i64 %__qsVar0____qsVar3__numPair____, 1 - br label %header__1 - -exit__1: ; preds = %header__1 - %19 = srem i64 %__qsVar0__numControls__, 2 - %20 = icmp eq i64 %19, 0 - br i1 %20, label %condTrue__1, label %condFalse__1 - -condTrue__1: ; preds = %exit__1 - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__temps__, i32 1) - br label %condContinue__1 - -condFalse__1: ; preds = %exit__1 - %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) - %23 = bitcast i8* %22 to %Qubit** - %24 = sub i64 %__qsVar0__numControls__, 1 - %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %24) - %26 = bitcast i8* %25 to %Qubit** - %27 = load %Qubit*, %Qubit** %26, align 8 - store %Qubit* %27, %Qubit** %23, align 8 - %28 = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar2__temps__, %Array* %21) - call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) - call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) - br label %condContinue__1 - -condContinue__1: ; preds = %condFalse__1, %condTrue__1 - %__qsVar1____qsVar4__newControls____ = phi %Array* [ %__qsVar2__temps__, %condTrue__1 ], [ %28, %condFalse__1 ] - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) - %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %29) - %30 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %31 = bitcast %Tuple* %30 to { %Array*, { %Qubit*, %Qubit* }* }* - %32 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %31, i32 0, i32 0 - %33 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %31, i32 0, i32 1 - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) - store %Array* %__qsVar1____qsVar4__newControls____, %Array** %32, align 8 - store { %Qubit*, %Qubit* }* %arg, { %Qubit*, %Qubit* }** %33, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %30, %Tuple* null) - %34 = sub i64 %__qsVar1__numControlPairs__, 1 - %35 = sub i64 %34, 0 - %36 = sdiv i64 %35, 1 - %37 = mul i64 1, %36 - %38 = add i64 0, %37 - %39 = load %Range, %Range* @EmptyRange, align 4 - %40 = insertvalue %Range %39, i64 %38, 0 - %41 = insertvalue %Range %40, i64 -1, 1 - %42 = insertvalue %Range %41, i64 0, 2 - %43 = extractvalue %Range %42, 0 - %44 = extractvalue %Range %42, 1 - %45 = extractvalue %Range %42, 2 - br label %preheader__1 - -preheader__1: ; preds = %condContinue__1 - %46 = icmp sgt i64 %44, 0 - br label %header__2 - -header__2: ; preds = %exiting__2, %preheader__1 - %__qsVar0____qsVar0____qsVar3__numPair______ = phi i64 [ %43, %preheader__1 ], [ %62, %exiting__2 ] - %47 = icmp sle i64 %__qsVar0____qsVar0____qsVar3__numPair______, %45 - %48 = icmp sge i64 %__qsVar0____qsVar0____qsVar3__numPair______, %45 - %49 = select i1 %46, i1 %47, i1 %48 - br i1 %49, label %body__2, label %exit__2 - -body__2: ; preds = %header__2 - %50 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ - %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %50) - %52 = bitcast i8* %51 to %Qubit** - %53 = load %Qubit*, %Qubit** %52, align 8 - %54 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ - %55 = add i64 %54, 1 - %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %55) - %57 = bitcast i8* %56 to %Qubit** - %58 = load %Qubit*, %Qubit** %57, align 8 - %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar0____qsVar3__numPair______) - %60 = bitcast i8* %59 to %Qubit** - %61 = load %Qubit*, %Qubit** %60, align 8 - call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %53, %Qubit* %58, %Qubit* %61) - br label %exiting__2 - -exiting__2: ; preds = %body__2 - %62 = add i64 %__qsVar0____qsVar0____qsVar3__numPair______, %44 - br label %header__2 - -exit__2: ; preds = %header__2 - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %__qsVar2__temps__) - call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) - ret void -} - -define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() #1 { -entry: - call void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - ret void -} - -define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() #2 { -entry: - call void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @2, i32 0, i32 0)) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) - -attributes #0 = { nounwind readnone speculatable willreturn } -attributes #1 = { "InteropFriendly" } -attributes #2 = { "EntryPoint" } From b404a07edc7223f352b622fda04d9ebf669f6dce Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 12 Aug 2021 16:55:36 +0200 Subject: [PATCH 083/106] Adding additional optimisation --- src/Passes/Source/Llvm/Llvm.hpp | 2 ++ src/Passes/Source/Profiles/BaseProfile.cpp | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/src/Passes/Source/Llvm/Llvm.hpp b/src/Passes/Source/Llvm/Llvm.hpp index 066d1c4644..e0b19b497f 100644 --- a/src/Passes/Source/Llvm/Llvm.hpp +++ b/src/Passes/Source/Llvm/Llvm.hpp @@ -54,6 +54,8 @@ // Profiles #include "llvm/IR/LegacyPassManager.h" #include "llvm/LinkAllPasses.h" +#include "llvm/Transforms/Scalar/ADCE.h" +#include "llvm/Transforms/Scalar/DCE.h" #if defined(__clang__) #pragma clang diagnostic pop diff --git a/src/Passes/Source/Profiles/BaseProfile.cpp b/src/Passes/Source/Profiles/BaseProfile.cpp index 9e99a2eed2..bbf4307a1e 100644 --- a/src/Passes/Source/Profiles/BaseProfile.cpp +++ b/src/Passes/Source/Profiles/BaseProfile.cpp @@ -18,6 +18,9 @@ llvm::ModulePassManager BaseProfile::createGenerationModulePass( auto function_pass_manager = pass_builder.buildFunctionSimplificationPipeline( optimisation_level, llvm::PassBuilder::ThinLTOPhase::PreLink, debug); + auto inliner_pass = pass_builder.buildInlinerPipeline( + optimisation_level, llvm::PassBuilder::ThinLTOPhase::PreLink, debug); + // TODO: Maybe this should be done at a module level function_pass_manager.addPass(ExpandStaticAllocationPass()); @@ -38,6 +41,11 @@ llvm::ModulePassManager BaseProfile::createGenerationModulePass( factory.disableStringSupport(); function_pass_manager.addPass(TransformationRulePass(std::move(rule_set))); + + // Eliminate dead code + function_pass_manager.addPass(llvm::DCEPass()); + function_pass_manager.addPass(llvm::ADCEPass()); + // function_pass_manager.addPass(llvm::createCalledValuePropagationPass()); // function_pass_manager.addPass(createSIFoldOperandsPass()); @@ -48,9 +56,21 @@ llvm::ModulePassManager BaseProfile::createGenerationModulePass( // modulePassManager.addPass(createModuleToCGSCCPassAdaptor(...)); // InlinerPass() + // auto &cgpm = inliner_pass.getPM(); + // cgpm.addPass(llvm::ADCEPass()); + + // CGPM.addPass(createCGSCCToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(LoopFooPass()))); + // CGPM.addPass(createCGSCCToFunctionPassAdaptor(FunctionFooPass())); + ret.addPass(createModuleToFunctionPassAdaptor(std::move(function_pass_manager))); + // TODO: Not available in 11 ret.addPass(llvm::createModuleToCGSCCPassAdaptor(std::move(CGPM))); + ret.addPass(llvm::AlwaysInlinerPass()); + ret.addPass(std::move(inliner_pass)); + // ret.addPass(); + // CGSCCA pass llvm::InlinerPass() + return ret; } From fbc81cd2782d9b9b9685d483640adaa6809ee49e Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 12 Aug 2021 16:56:19 +0200 Subject: [PATCH 084/106] Updating README --- src/Passes/README.md | 138 +++++++++++++++++-------------------------- 1 file changed, 54 insertions(+), 84 deletions(-) diff --git a/src/Passes/README.md b/src/Passes/README.md index fac6675081..ac8cebb154 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -90,90 +90,60 @@ source_filename = "./analysis-example.ll" define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { entry: - %leftMessage = inttoptr i64 0 to %Qubit* - %rightMessage = inttoptr i64 1 to %Qubit* - call void @__quantum__qis__h(%Qubit* %leftMessage) - call void @__quantum__qis__cnot(%Qubit* %leftMessage, %Qubit* %rightMessage) - %0 = inttoptr i64 0 to %Qubit* - %1 = inttoptr i64 2 to %Qubit* - call void @__quantum__qis__h(%Qubit* %0) - call void @__quantum__qis__cnot(%Qubit* %0, %Qubit* %1) - %2 = inttoptr i64 1 to %Qubit* - %3 = inttoptr i64 3 to %Qubit* - call void @__quantum__qis__h(%Qubit* %2) - call void @__quantum__qis__cnot(%Qubit* %2, %Qubit* %3) - %4 = inttoptr i64 0 to %Qubit* - %5 = inttoptr i64 2 to %Qubit* - call void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body.1(%Qubit* %rightMessage, %Qubit* %4, %Qubit* %5) - %6 = inttoptr i64 2 to %Qubit* - %7 = inttoptr i64 1 to %Qubit* - %8 = inttoptr i64 3 to %Qubit* - call void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2(%Qubit* %6, %Qubit* %7, %Qubit* %8) - %result.i = inttoptr i64 0 to %Result* - call void @__quantum__qis__mz__body(%Qubit* %leftMessage, %Result* %result.i) - call void @__quantum__qis__reset__body(%Qubit* %leftMessage) - %9 = inttoptr i64 3 to %Qubit* - %result.i1 = inttoptr i64 1 to %Result* - call void @__quantum__qis__mz__body(%Qubit* %9, %Result* %result.i1) - call void @__quantum__qis__reset__body(%Qubit* %9) - ret void -} - -define internal fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body.1(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { -entry: - call void @__quantum__qis__cnot(%Qubit* %src, %Qubit* %intermediary) - call void @__quantum__qis__h(%Qubit* %src) - %result.i.i = inttoptr i64 4 to %Result* - %0 = call i1 @__quantum__qir__read_result(%Result* %result.i.i) - call void @__quantum__qis__mz__body(%Qubit* %src, %Result* %result.i.i) - call void @__quantum__qis__reset__body(%Qubit* %src) - br i1 %0, label %then0__1.i, label %continue__1.i - -then0__1.i: ; preds = %entry - call void @__quantum__qis__z(%Qubit* %dest) - br label %continue__1.i - -continue__1.i: ; preds = %then0__1.i, %entry - %result.i1.i = inttoptr i64 5 to %Result* - %1 = call i1 @__quantum__qir__read_result(%Result* %result.i1.i) - call void @__quantum__qis__mz__body(%Qubit* %intermediary, %Result* %result.i1.i) - call void @__quantum__qis__reset__body(%Qubit* %intermediary) - br i1 %1, label %then0__2.i, label %TeleportChain__ApplyCorrection__body.exit - -then0__2.i: ; preds = %continue__1.i - call void @__quantum__qis__x(%Qubit* %dest) - br label %TeleportChain__ApplyCorrection__body.exit - -TeleportChain__ApplyCorrection__body.exit: ; preds = %then0__2.i, %continue__1.i - ret void -} - -define internal fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { -entry: - call void @__quantum__qis__cnot(%Qubit* %src, %Qubit* %intermediary) - call void @__quantum__qis__h(%Qubit* %src) - %result.i.i = inttoptr i64 6 to %Result* - %0 = call i1 @__quantum__qir__read_result(%Result* %result.i.i) - call void @__quantum__qis__mz__body(%Qubit* %src, %Result* %result.i.i) - call void @__quantum__qis__reset__body(%Qubit* %src) - br i1 %0, label %then0__1.i, label %continue__1.i - -then0__1.i: ; preds = %entry - call void @__quantum__qis__z(%Qubit* %dest) - br label %continue__1.i - -continue__1.i: ; preds = %then0__1.i, %entry - %result.i1.i = inttoptr i64 7 to %Result* - %1 = call i1 @__quantum__qir__read_result(%Result* %result.i1.i) - call void @__quantum__qis__mz__body(%Qubit* %intermediary, %Result* %result.i1.i) - call void @__quantum__qis__reset__body(%Qubit* %intermediary) - br i1 %1, label %then0__2.i, label %TeleportChain__ApplyCorrection__body.exit - -then0__2.i: ; preds = %continue__1.i - call void @__quantum__qis__x(%Qubit* %dest) - br label %TeleportChain__ApplyCorrection__body.exit - -TeleportChain__ApplyCorrection__body.exit: ; preds = %then0__2.i, %continue__1.i + call void @__quantum__qis__h(%Qubit* null) + call void @__quantum__qis__cnot(%Qubit* null, %Qubit* nonnull inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__h(%Qubit* null) + call void @__quantum__qis__cnot(%Qubit* null, %Qubit* nonnull inttoptr (i64 2 to %Qubit*)) + call void @__quantum__qis__h(%Qubit* nonnull inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__cnot(%Qubit* nonnull inttoptr (i64 1 to %Qubit*), %Qubit* nonnull inttoptr (i64 3 to %Qubit*)) + call void @__quantum__qis__cnot(%Qubit* nonnull inttoptr (i64 1 to %Qubit*), %Qubit* null) + call void @__quantum__qis__h(%Qubit* nonnull inttoptr (i64 1 to %Qubit*)) + %0 = call i1 @__quantum__qir__read_result(%Result* nonnull inttoptr (i64 4 to %Result*)) + call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*), %Result* nonnull inttoptr (i64 4 to %Result*)) + call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*)) + br i1 %0, label %then0__1.i.i, label %continue__1.i.i + +then0__1.i.i: ; preds = %entry + call void @__quantum__qis__z(%Qubit* nonnull inttoptr (i64 2 to %Qubit*)) + br label %continue__1.i.i + +continue__1.i.i: ; preds = %then0__1.i.i, %entry + %1 = call i1 @__quantum__qir__read_result(%Result* nonnull inttoptr (i64 5 to %Result*)) + call void @__quantum__qis__mz__body(%Qubit* null, %Result* nonnull inttoptr (i64 5 to %Result*)) + call void @__quantum__qis__reset__body(%Qubit* null) + br i1 %1, label %then0__2.i.i, label %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.1.exit + +then0__2.i.i: ; preds = %continue__1.i.i + call void @__quantum__qis__x(%Qubit* nonnull inttoptr (i64 2 to %Qubit*)) + br label %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.1.exit + +TeleportChain__TeleportQubitUsingPresharedEntanglement__body.1.exit: ; preds = %continue__1.i.i, %then0__2.i.i + call void @__quantum__qis__cnot(%Qubit* nonnull inttoptr (i64 2 to %Qubit*), %Qubit* nonnull inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__h(%Qubit* nonnull inttoptr (i64 2 to %Qubit*)) + %2 = call i1 @__quantum__qir__read_result(%Result* nonnull inttoptr (i64 6 to %Result*)) + call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 2 to %Qubit*), %Result* nonnull inttoptr (i64 6 to %Result*)) + call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 2 to %Qubit*)) + br i1 %2, label %then0__1.i.i2, label %continue__1.i.i3 + +then0__1.i.i2: ; preds = %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.1.exit + call void @__quantum__qis__z(%Qubit* nonnull inttoptr (i64 3 to %Qubit*)) + br label %continue__1.i.i3 + +continue__1.i.i3: ; preds = %then0__1.i.i2, %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.1.exit + %3 = call i1 @__quantum__qir__read_result(%Result* nonnull inttoptr (i64 7 to %Result*)) + call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*), %Result* nonnull inttoptr (i64 7 to %Result*)) + call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*)) + br i1 %3, label %then0__2.i.i4, label %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit + +then0__2.i.i4: ; preds = %continue__1.i.i3 + call void @__quantum__qis__x(%Qubit* nonnull inttoptr (i64 3 to %Qubit*)) + br label %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit + +TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit: ; preds = %continue__1.i.i3, %then0__2.i.i4 + call void @__quantum__qis__mz__body(%Qubit* null, %Result* null) + call void @__quantum__qis__reset__body(%Qubit* null) + call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 3 to %Qubit*), %Result* nonnull inttoptr (i64 1 to %Result*)) + call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 3 to %Qubit*)) ret void } From 7c73314cce2d8aec909bfb0a405a61690b216b44 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 12 Aug 2021 17:52:17 +0200 Subject: [PATCH 085/106] Correcting mistake --- .../Source/AllocationManager/AllocationManager.cpp | 8 +++----- .../ConstSizeArray/ConstSizeArray.qs | 13 ------------- 2 files changed, 3 insertions(+), 18 deletions(-) diff --git a/src/Passes/Source/AllocationManager/AllocationManager.cpp b/src/Passes/Source/AllocationManager/AllocationManager.cpp index d6019ed6cf..e2c2178aa5 100644 --- a/src/Passes/Source/AllocationManager/AllocationManager.cpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.cpp @@ -21,6 +21,7 @@ AllocationManager::AllocationManagerPtr AllocationManager::createNew() AllocationManager::Index AllocationManager::allocate() { auto ret = start_; + llvm::errs() << "ALLOCATING AT " << ret << "\n"; ++start_; return ret; } @@ -52,11 +53,8 @@ void AllocationManager::allocate(String const &name, Index const &size, bool val } name_to_index_[map.name] = map.index; - if (!mappings_.empty()) - { - map.start = start_; - start_ += size; - } + map.start = start_; + start_ += size; map.end = map.start + size; mappings_.emplace_back(std::move(map)); diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index f0bc733273..8a531d44d7 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -20,19 +20,6 @@ namespace TeleportChain { ApplyCorrection(src, intermediary, dest); } - operation TeleportQubit(src : Qubit, dest : Qubit) : Unit { - use intermediary = Qubit(); - PrepareEntangledPair(intermediary, dest); - TeleportQubitUsingPresharedEntanglement(src, intermediary, dest); - } - - operation DemonstrateEntanglementSwapping() : (Result, Result) { - use (reference, src, intermediary, dest) = (Qubit(), Qubit(), Qubit(), Qubit()); - PrepareEntangledPair(reference, src); - TeleportQubit(src, dest); - return (MResetZ(reference), MResetZ(dest)); - } - @EntryPoint() operation DemonstrateTeleportationUsingPresharedEntanglement() : Unit { let nPairs = 2; From 9f5f5c2acf6874c703c16d8d1399fbaf5dcc5f90 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 12 Aug 2021 17:52:54 +0200 Subject: [PATCH 086/106] Removing debug output --- src/Passes/Source/AllocationManager/AllocationManager.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Passes/Source/AllocationManager/AllocationManager.cpp b/src/Passes/Source/AllocationManager/AllocationManager.cpp index e2c2178aa5..e09fb0bfed 100644 --- a/src/Passes/Source/AllocationManager/AllocationManager.cpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.cpp @@ -21,7 +21,6 @@ AllocationManager::AllocationManagerPtr AllocationManager::createNew() AllocationManager::Index AllocationManager::allocate() { auto ret = start_; - llvm::errs() << "ALLOCATING AT " << ret << "\n"; ++start_; return ret; } From a7467bf95226485f6be21e07ce998f461d8422d0 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 12 Aug 2021 21:11:58 +0200 Subject: [PATCH 087/106] Fixing bug --- src/Passes/Source/Profiles/BaseProfile.cpp | 2 +- src/Passes/Source/Rules/Factory.cpp | 14 ++++--- .../ConstSizeArray/ConstSizeArray.qs | 5 +-- .../examples/QubitAllocationAnalysis/Makefile | 1 + .../analysis-example.ll | 41 +++++++++++-------- 5 files changed, 35 insertions(+), 28 deletions(-) diff --git a/src/Passes/Source/Profiles/BaseProfile.cpp b/src/Passes/Source/Profiles/BaseProfile.cpp index bbf4307a1e..dadcbace8a 100644 --- a/src/Passes/Source/Profiles/BaseProfile.cpp +++ b/src/Passes/Source/Profiles/BaseProfile.cpp @@ -13,7 +13,7 @@ llvm::ModulePassManager BaseProfile::createGenerationModulePass( llvm::PassBuilder &pass_builder, llvm::PassBuilder::OptimizationLevel &optimisation_level, bool debug) { - auto ret = pass_builder.buildPerModuleDefaultPipeline(llvm::PassBuilder::OptimizationLevel::O1); + auto ret = pass_builder.buildPerModuleDefaultPipeline(optimisation_level); // buildPerModuleDefaultPipeline buildModuleOptimizationPipeline auto function_pass_manager = pass_builder.buildFunctionSimplificationPipeline( optimisation_level, llvm::PassBuilder::ThinLTOPhase::PreLink, debug); diff --git a/src/Passes/Source/Rules/Factory.cpp b/src/Passes/Source/Rules/Factory.cpp index dd06ab29e5..ce789195e5 100644 --- a/src/Passes/Source/Rules/Factory.cpp +++ b/src/Passes/Source/Rules/Factory.cpp @@ -53,6 +53,7 @@ void RuleFactory::useStaticQuantumArrayAllocation() replacements.push_back({llvm::dyn_cast(val), nullptr}); return true; }; + addRule({Call("__quantum__rt__qubit_allocate_array", "size"_cap = _), allocation_replacer}); /// Array access replacement @@ -253,9 +254,8 @@ void RuleFactory::optimiseBranchQuatumOne() function = llvm::Function::Create(fnc_type, llvm::Function::ExternalLinkage, "__quantum__qir__read_result", module); } - auto result_inst = llvm::dyn_cast(result); - builder.SetInsertPoint(result_inst->getNextNode()); + builder.SetInsertPoint(llvm::dyn_cast(val)); auto new_call = builder.CreateCall(function, arguments); new_call->takeName(cond); @@ -273,6 +273,12 @@ void RuleFactory::optimiseBranchQuatumOne() return false; }; + /* + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + br i1 %2, label %then0__1, label %continue__1 + */ + // Variations of get_one addRule({Branch("cond"_cap = Call("__quantum__rt__result_equal", "result"_cap = _, "one"_cap = get_one), @@ -290,10 +296,6 @@ void RuleFactory::disableReferenceCounting() removeFunctionCall("__quantum__rt__array_update_reference_count"); removeFunctionCall("__quantum__rt__string_update_reference_count"); removeFunctionCall("__quantum__rt__result_update_reference_count"); - - removeFunctionCall("__quantum__rt__string_create"); - removeFunctionCall("__quantum__rt__string_release"); - removeFunctionCall("__quantum__rt__message"); } void RuleFactory::disableAliasCounting() diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index 8a531d44d7..257b84fbd7 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -21,7 +21,7 @@ namespace TeleportChain { } @EntryPoint() - operation DemonstrateTeleportationUsingPresharedEntanglement() : Unit { + operation DemonstrateTeleportationUsingPresharedEntanglement() : Result { let nPairs = 2; use (leftMessage, rightMessage, leftPreshared, rightPreshared) = (Qubit(), Qubit(), Qubit[nPairs], Qubit[nPairs]); PrepareEntangledPair(leftMessage, rightMessage); @@ -35,7 +35,6 @@ namespace TeleportChain { } let _ = MResetZ(leftMessage); - let _ = MResetZ(rightPreshared[nPairs-1]); - // return (); + return MResetZ(rightPreshared[nPairs-1]); } } \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index f9937dca44..8a5e91d8d7 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -4,6 +4,7 @@ run-expand: build-qaa build-esa analysis-example.ll run: build-qaa analysis-example.ll + # TODO(tfr): Add comments opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll run-replace: build-ir build-qaa build-esa analysis-example.ll diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll index 01079efdd2..dd27b7facc 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -6,8 +6,6 @@ source_filename = "qir/ConstSizeArray.ll" %Array = type opaque %String = type opaque -@0 = internal constant [3 x i8] c"()\00" - define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { entry: %0 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) @@ -60,7 +58,7 @@ entry: ret void } -define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { +define internal fastcc %Result* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { entry: %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() @@ -108,12 +106,11 @@ entry: call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) call void @__quantum__rt__result_update_reference_count(%Result* %27, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 -1) call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) - ret void + ret %Result* %31 } declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr @@ -173,6 +170,10 @@ entry: ret void } +declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr + declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) local_unnamed_addr declare void @__quantum__qis__h(%Qubit*) local_unnamed_addr @@ -181,29 +182,33 @@ declare void @__quantum__qis__x(%Qubit*) local_unnamed_addr declare void @__quantum__qis__z(%Qubit*) local_unnamed_addr -declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr - -declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr - -define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { +define i8 @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { entry: - call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - ret void + %0 = call fastcc %Result* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %1 = call %Result* @__quantum__rt__result_get_zero() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + %not. = xor i1 %2, true + %3 = sext i1 %not. to i8 + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + ret i8 %3 } +declare %Result* @__quantum__rt__result_get_zero() local_unnamed_addr + define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { entry: - call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i64 0, i64 0)) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + %0 = call fastcc %Result* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %1 = call %String* @__quantum__rt__result_to_string(%Result* %0) + call void @__quantum__rt__message(%String* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) ret void } declare void @__quantum__rt__message(%String*) local_unnamed_addr +declare %String* @__quantum__rt__result_to_string(%Result*) local_unnamed_addr + declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr attributes #0 = { "InteropFriendly" } From d79d0d71f20e66139346d7ab3fb7dda3f7b835bb Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 13 Aug 2021 13:46:17 +0200 Subject: [PATCH 088/106] Documentation and refactoring --- .../AllocationManager/AllocationManager.hpp | 60 ++- src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp | 29 ++ src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp | 42 ++ src/Passes/Source/Apps/Qat/Qat.cpp | 113 +++--- .../Source/Commandline/ParameterParser.cpp | 3 + .../Source/Commandline/ParameterParser.hpp | 58 ++- .../LibTransformationRule.cpp | 4 +- src/Passes/Source/Profiles/BaseProfile.cpp | 4 +- src/Passes/Source/Rules/Factory.cpp | 4 +- src/Passes/Source/Rules/Factory.hpp | 36 +- src/Passes/Source/Rules/OperandPrototype.cpp | 10 + src/Passes/Source/Rules/OperandPrototype.hpp | 50 ++- src/Passes/Source/Rules/ReplacementRule.hpp | 28 ++ src/Passes/Source/Rules/RuleSet.cpp | 3 + src/Passes/Source/Rules/RuleSet.hpp | 19 +- .../examples/QubitAllocationAnalysis/Makefile | 6 +- .../analysis-example.ll | 43 +- .../examples/QubitAllocationAnalysis/test.ll | 0 .../examples/QubitAllocationAnalysis/test1.ll | 378 ------------------ .../examples/QubitAllocationAnalysis/test2.ll | 288 ------------- 20 files changed, 375 insertions(+), 803 deletions(-) create mode 100644 src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp create mode 100644 src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/test.ll delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/test1.ll delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/test2.ll diff --git a/src/Passes/Source/AllocationManager/AllocationManager.hpp b/src/Passes/Source/AllocationManager/AllocationManager.hpp index d4b1df39f4..909736614f 100644 --- a/src/Passes/Source/AllocationManager/AllocationManager.hpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.hpp @@ -14,39 +14,77 @@ namespace quantum { class AllocationManager { public: - using Index = uint64_t; - using String = std::string; - using AllocationManagerPtr = std::shared_ptr; - using Resource = std::vector; - using Resources = std::unordered_map; - + /// Defines a named register/memory segment with start + /// position, end position and size. struct MemoryMapping { - String name{""}; + using Index = uint64_t; + using String = std::string; + + String name{""}; ///< Name of the segment, if any given Index index{0}; ///< Index of the allocation Index size{0}; ///< Size of memory segment Index start{0}; ///< Start index of memory segment Index end{0}; ///< Index not included in memory segment }; - using NameToIndex = std::unordered_map; - using Mappings = std::vector; + using Index = uint64_t; + using String = std::string; + using AllocationManagerPtr = std::shared_ptr; + using Resource = std::vector; + using Resources = std::unordered_map; + using NameToIndex = std::unordered_map; + using Mappings = std::vector; + + /// Pointer contstruction + /// @{ + /// Creates a new allocation manager. The manager is kept + /// as a shared pointer to enable allocation accross diffent + /// passes and/or replacement rules. static AllocationManagerPtr createNew(); + /// @} + /// Allocation and release functions + /// @{ + /// Allocates a single address. Index allocate(); - void allocate(String const &name, Index const &size, bool value_only = false); + + /// Allocates a name segment of a given size. + void allocate(String const &name, Index const &size, bool value_only = false); + + /// Gets the offset of a name segment or address. Index getOffset(String const &name) const; - void release(String const &name); + /// Releases the named segment or address. + void release(String const &name); + + /// Retrieves a named resource. Resource &get(String const &name); + /// @} private: + /// Private constructors + /// @{ + /// Public construction of this object is only allowed + /// as a shared pointer. To create a new AllocationManager, + /// use AllocationManager::createNew(). AllocationManager() = default; + /// @} + /// Memory mapping + /// @{ + /// Each allocation has a register/memory mapping which + /// keeps track of the NameToIndex name_to_index_; Mappings mappings_; + /// @} + /// Compile-time resources + /// @{ + /// Compile-time allocated resources such as + /// arrays who Resources resources_; + /// @} Index start_{0}; }; diff --git a/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp b/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp new file mode 100644 index 0000000000..e4fb7ae980 --- /dev/null +++ b/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp @@ -0,0 +1,29 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Apps/Qat/LlvmAnalysis.hpp" + +#include "Llvm/Llvm.hpp" + +namespace microsoft { +namespace quantum { + +LlvmAnalyser::LlvmAnalyser(bool debug) + : loop_analysis_manager{debug} + , function_analysis_manager{debug} + , cGSCCAnalysisManager{debug} + , module_analysis_manager{debug} + , +{ + pass_builder.registerModuleAnalyses(module_analysis_manager); + pass_builder.registerCGSCCAnalyses(gscc_analysis_manager); + pass_builder.registerFunctionAnalyses(function_analysis_manager); + pass_builder.registerLoopAnalyses(loop_analysis_manager); + + pass_builder.crossRegisterProxies(loop_analysis_manager, function_analysis_manager, + gscc_analysis_manager, module_analysis_manager); +} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp b/src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp new file mode 100644 index 0000000000..eaa2666ca4 --- /dev/null +++ b/src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp @@ -0,0 +1,42 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" + +namespace microsoft { +namespace quantum { + +struct LlvmAnalyser +{ + /// Constructors + /// @{ + explicit LlvmAnalyser(bool debug); + + // Default construction not allowed as this leads + // to invalid configuration of the managers. + LlvmAnalyser() = delete; + + // Copy construction prohibited due to restrictions + // on the member variables. + LlvmAnalyser(LlvmAnalyser const &) = delete; + + // Prefer move construction at all times. + LlvmAnalyser(LlvmAnalyser &&) = default; + + // Default deconstruction. + ~LlvmAnalyser() = default; + /// @} + + /// Objects used to run a set of passes + /// @{ + llvm::PassBuilder pass_builder; + llvm::LoopAnalysisManager loop_analysis_manager; + llvm::FunctionAnalysisManager function_analysis_manager; + llvm::CGSCCAnalysisManager gscc_analysis_manager; + llvm::ModuleAnalysisManager module_analysis_manager; + /// @} +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Apps/Qat/Qat.cpp b/src/Passes/Source/Apps/Qat/Qat.cpp index 3ce588330c..994fff212b 100644 --- a/src/Passes/Source/Apps/Qat/Qat.cpp +++ b/src/Passes/Source/Apps/Qat/Qat.cpp @@ -1,3 +1,7 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Apps/Qat/LlvmAnalysis.hpp" #include "Commandline/ParameterParser.hpp" #include "Commandline/Settings.hpp" #include "Llvm/Llvm.hpp" @@ -14,17 +18,17 @@ using namespace microsoft::quantum; int main(int argc, char **argv) { // Parsing commmandline arguments - Settings settings{{ - {"debug", "false"}, - {"generate", "false"}, - {"validate", "false"}, - {"profile", "base-profile"}, - }}; + Settings settings{{{"debug", "false"}, + {"generate", "false"}, + {"validate", "false"}, + {"profile", "base-profile"}, + {"S", "false"}}}; ParameterParser parser(settings); parser.addFlag("debug"); parser.addFlag("generate"); parser.addFlag("validate"); + parser.addFlag("S"); parser.parseArgs(argc, argv); @@ -45,72 +49,57 @@ int main(int argc, char **argv) exit(-1); } - // settings.print(); - - // Generating IR - bool debug = settings.get("debug") == "true"; - bool generate = settings.get("generate") == "true"; - bool validate = settings.get("validate") == "true"; - auto optimisation_level = llvm::PassBuilder::OptimizationLevel::O1; - BaseProfile profile; + // Extracting commandline parameters + bool debug = settings.get("debug") == "true"; + bool generate = settings.get("generate") == "true"; + bool validate = settings.get("validate") == "true"; + auto optimisation_level = llvm::PassBuilder::OptimizationLevel::O1; + std::shared_ptr profile = std::make_shared(); - // Worth looking at: - // https://opensource.apple.com/source/lldb/lldb-76/llvm/tools/opt/opt.cpp + // In case we debug, we also print the settings to allow provide a full + // picture of what is going. + if (debug) + { + settings.print(); + } + // Checking if we are asked to generate a new QIR. If so, we will use + // the profile to setup passes to if (generate) { // Creating pass builder - llvm::PassBuilder pass_builder; - llvm::LoopAnalysisManager loopAnalysisManager(debug); - llvm::FunctionAnalysisManager functionAnalysisManager(debug); - llvm::CGSCCAnalysisManager cGSCCAnalysisManager(debug); - llvm::ModuleAnalysisManager moduleAnalysisManager(debug); - - pass_builder.registerModuleAnalyses(moduleAnalysisManager); - pass_builder.registerCGSCCAnalyses(cGSCCAnalysisManager); - pass_builder.registerFunctionAnalyses(functionAnalysisManager); - pass_builder.registerLoopAnalyses(loopAnalysisManager); - - pass_builder.crossRegisterProxies(loopAnalysisManager, functionAnalysisManager, - cGSCCAnalysisManager, moduleAnalysisManager); - - profile.addFunctionAnalyses(functionAnalysisManager); - auto modulePassManager = - profile.createGenerationModulePass(pass_builder, optimisation_level, debug); - - modulePassManager.run(*module, moduleAnalysisManager); - - // - - llvm::legacy::PassManager legacy_pass_manager; - legacy_pass_manager.add(llvm::createCalledValuePropagationPass()); - legacy_pass_manager.add(llvm::createCalledValuePropagationPass()); - legacy_pass_manager.add(llvm::createConstantMergePass()); - legacy_pass_manager.run(*module); - - llvm::errs() << *module << "\n"; + LlvmAnalyser analyser{debug}; + + // Preparing pass for generation based on profile + profile->addFunctionAnalyses(analyser.function_analysis_manager); + auto module_pass_manager = + profile->createGenerationModulePass(analyser.pass_builder, optimisation_level, debug); + + // Running the pass built by the profile + module_pass_manager.run(*module, analyser.module_analysis_manager); + + // Priniting either human readible LL code or byte + // code as a result, depending on the users preference. + if (settings.get("S") == "true") + { + llvm::errs() << *module << "\n"; + } + else + { + llvm::errs() + << "Byte code ouput is not supported yet. Please add -S to get human readible LL code.\n"; + } } if (validate) { // Creating pass builder - llvm::PassBuilder pass_builder; - llvm::LoopAnalysisManager loopAnalysisManager(debug); - llvm::FunctionAnalysisManager functionAnalysisManager(debug); - llvm::CGSCCAnalysisManager cGSCCAnalysisManager(debug); - llvm::ModuleAnalysisManager moduleAnalysisManager(debug); - - pass_builder.registerModuleAnalyses(moduleAnalysisManager); - pass_builder.registerCGSCCAnalyses(cGSCCAnalysisManager); - pass_builder.registerFunctionAnalyses(functionAnalysisManager); - pass_builder.registerLoopAnalyses(loopAnalysisManager); - - pass_builder.crossRegisterProxies(loopAnalysisManager, functionAnalysisManager, - cGSCCAnalysisManager, moduleAnalysisManager); - - auto modulePassManager = - profile.createValidationModulePass(pass_builder, optimisation_level, debug); - modulePassManager.run(*module, moduleAnalysisManager); + LlvmAnalyser analyser{debug}; + + // Creating a validation pass manager + auto module_pass_manager = + profile->createValidationModulePass(analyser.pass_builder, optimisation_level, debug); + module_pass_manager.run(*module, analyser.module_analysis_manager); } return 0; diff --git a/src/Passes/Source/Commandline/ParameterParser.cpp b/src/Passes/Source/Commandline/ParameterParser.cpp index d559313baa..b8cd011f86 100644 --- a/src/Passes/Source/Commandline/ParameterParser.cpp +++ b/src/Passes/Source/Commandline/ParameterParser.cpp @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + #include "Commandline/ParameterParser.hpp" #include diff --git a/src/Passes/Source/Commandline/ParameterParser.hpp b/src/Passes/Source/Commandline/ParameterParser.hpp index c4d78e4d34..269b5e29a3 100644 --- a/src/Passes/Source/Commandline/ParameterParser.hpp +++ b/src/Passes/Source/Commandline/ParameterParser.hpp @@ -1,4 +1,7 @@ #pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + #include "Commandline/Settings.hpp" #include @@ -16,27 +19,60 @@ class ParameterParser using Arguments = std::vector; using Flags = std::unordered_set; + /// Construction and deconstrution configuration + /// @{ + /// Parameter parsers requires a setting class to store + /// parameters passed. + explicit ParameterParser(Settings &settings); + + // Allow move semantics only. No default construction + ParameterParser() = delete; + ParameterParser(ParameterParser const &other) = delete; + ParameterParser(ParameterParser &&other) = default; + ~ParameterParser() = default; + /// @} + + /// Configuration + /// @{ + + /// Marks a name as a flag (as opposed to an option). + /// This ensures that no parameter is expected after + /// the flag is specified. For instance `--debug` is + /// a flag as opposed to `--log-level 3` which is an + /// option. + void addFlag(String const &v); + /// @} + + /// Operation + /// @{ + /// Parses the command line arguments given the argc and argv + /// from the main function. + void parseArgs(int argc, char **argv); + + /// Returns list of arguments without flags and/or options + /// included. + Arguments const &arguments() const; + String const & getArg(uint64_t const &n); + /// @} +private: struct ParsedValue { bool is_key{false}; String value; }; - ParameterParser(Settings &settings); - - void parseArgs(int argc, char **argv); - void addFlag(String const &v); - Arguments const &arguments() const; - String const & getArg(uint64_t const &n); - -private: + /// Helper functions and variables + /// @{ ParsedValue parseSingleArg(String key); + bool hasValue(String const &key); + Flags flags_{}; + /// @} - bool hasValue(String const &key); - + /// Storage of parsed data + /// @{ Settings &settings_; Arguments arguments_{}; - Flags flags_{}; + /// @} }; } // namespace quantum diff --git a/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp b/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp index 31a53ccfca..f1e9cf2ddc 100644 --- a/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp +++ b/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp @@ -27,8 +27,8 @@ llvm::PassPluginLibraryInfo getTransformationRulePluginInfo() // Defining the mapping auto factory = RuleFactory(rule_set); - factory.useStaticQuantumArrayAllocation(); - factory.useStaticQuantumAllocation(); + factory.useStaticQubitArrayAllocation(); + factory.useStaticQubitAllocation(); factory.useStaticResultAllocation(); factory.optimiseBranchQuatumOne(); diff --git a/src/Passes/Source/Profiles/BaseProfile.cpp b/src/Passes/Source/Profiles/BaseProfile.cpp index dadcbace8a..cfb144586d 100644 --- a/src/Passes/Source/Profiles/BaseProfile.cpp +++ b/src/Passes/Source/Profiles/BaseProfile.cpp @@ -29,8 +29,8 @@ llvm::ModulePassManager BaseProfile::createGenerationModulePass( // Defining the mapping auto factory = RuleFactory(rule_set); - factory.useStaticQuantumArrayAllocation(); - factory.useStaticQuantumAllocation(); + factory.useStaticQubitArrayAllocation(); + factory.useStaticQubitAllocation(); factory.useStaticResultAllocation(); factory.optimiseBranchQuatumOne(); diff --git a/src/Passes/Source/Rules/Factory.cpp b/src/Passes/Source/Rules/Factory.cpp index ce789195e5..55ba1fa2d9 100644 --- a/src/Passes/Source/Rules/Factory.cpp +++ b/src/Passes/Source/Rules/Factory.cpp @@ -32,7 +32,7 @@ void RuleFactory::removeFunctionCall(String const &name) addRule(std::move(ret)); } -void RuleFactory::useStaticQuantumArrayAllocation() +void RuleFactory::useStaticQubitArrayAllocation() { // TODO(tfr): Consider using weak pointers auto qubit_alloc_manager = qubit_alloc_manager_; @@ -121,7 +121,7 @@ void RuleFactory::useStaticQuantumArrayAllocation() }); } -void RuleFactory::useStaticQuantumAllocation() +void RuleFactory::useStaticQubitAllocation() { auto qubit_alloc_manager = qubit_alloc_manager_; auto allocation_replacer = [qubit_alloc_manager](Builder &builder, Value *val, Captures &, diff --git a/src/Passes/Source/Rules/Factory.hpp b/src/Passes/Source/Rules/Factory.hpp index 3dc9ff7563..00509b6148 100644 --- a/src/Passes/Source/Rules/Factory.hpp +++ b/src/Passes/Source/Rules/Factory.hpp @@ -12,7 +12,9 @@ namespace microsoft { namespace quantum { -struct RuleFactory +/// Rule factory provides a high-level methods to build a ruleset that +/// enforces certain aspects of QIR transformation. +class RuleFactory { public: using String = std::string; @@ -24,15 +26,29 @@ struct RuleFactory using Value = llvm::Value; using Builder = ReplacementRule::Builder; - RuleFactory(RuleSet &rule_set); + /// Constructor configuration. Explicit construction with + /// rule set to be configured, which can be moved using move + /// semantics. No copy allowed. + /// @{ + explicit RuleFactory(RuleSet &rule_set); + RuleFactory() = delete; + RuleFactory(RuleFactory const &) = delete; + RuleFactory(RuleFactory &&) = default; + ~RuleFactory() = default; + /// @} /// Generic rules + /// @{ + /// Removes all calls to functions with a specified name. + /// This function matches on name alone and ignores function + /// arguments. void removeFunctionCall(String const &name); + /// @} /// Conventions /// @{ - void useStaticQuantumArrayAllocation(); - void useStaticQuantumAllocation(); + void useStaticQubitArrayAllocation(); + void useStaticQubitAllocation(); void useStaticResultAllocation(); /// @} @@ -47,18 +63,22 @@ struct RuleFactory void disableReferenceCounting(); void disableAliasCounting(); void disableStringSupport(); - // TODO: void disableDynamicQuantumAllocation(); /// @} + /// Allocation Managers + /// @{ AllocationManagerPtr qubitAllocationManager() const; AllocationManagerPtr resultAllocationManager() const; - + /// @} private: ReplacementRulePtr addRule(ReplacementRule &&rule); - RuleSet &rule_set_; + /// Affected artefacts + /// @{ + RuleSet &rule_set_; ///< The ruleset we are building + /// @} - /// Allocation managers + /// Allocation managers. Allocation managers for different types /// @{ AllocationManagerPtr qubit_alloc_manager_{nullptr}; AllocationManagerPtr result_alloc_manager_{nullptr}; diff --git a/src/Passes/Source/Rules/OperandPrototype.cpp b/src/Passes/Source/Rules/OperandPrototype.cpp index 4ce85cc060..9ed3e3c3be 100644 --- a/src/Passes/Source/Rules/OperandPrototype.cpp +++ b/src/Passes/Source/Rules/OperandPrototype.cpp @@ -44,6 +44,16 @@ bool OperandPrototype::matchChildren(Value *value, Captures &captures) const return true; } +void OperandPrototype::addChild(Child const &child) +{ + children_.push_back(child); +} + +void OperandPrototype::enableCapture(std::string capture_name) +{ + capture_name_ = capture_name; +} + bool OperandPrototype::fail(Value * /*value*/, Captures & /*captures*/) const { return false; diff --git a/src/Passes/Source/Rules/OperandPrototype.hpp b/src/Passes/Source/Rules/OperandPrototype.hpp index e437db6b2f..c365ce4e6d 100644 --- a/src/Passes/Source/Rules/OperandPrototype.hpp +++ b/src/Passes/Source/Rules/OperandPrototype.hpp @@ -10,6 +10,8 @@ namespace microsoft { namespace quantum { +/// OperandPrototype describes an IR pattern and allows matching against +/// LLVMs llvm::Value type. class OperandPrototype { public: @@ -20,39 +22,63 @@ class OperandPrototype using Children = std::vector; using Captures = std::unordered_map; + /// Constructors and desctructors + /// @{ OperandPrototype() = default; - virtual ~OperandPrototype(); + /// @} + + /// Interface functions + /// @{ virtual bool match(Value *value, Captures &captures) const = 0; virtual Child copy() const = 0; + /// @} - void addChild(Child const &child) - { - children_.push_back(child); - } + /// Shared functionality + /// @{ - void enableCapture(std::string capture_name) - { - capture_name_ = capture_name; - } + /// Adds a child to be matched against the matchees children. Children + /// are matched in order and by size. + void addChild(Child const &child); + /// Flags that this operand should be captured. This function ensures + /// that the captured operand is given a name. The subsequent logic + /// in this class is responsible for capturing (upon match) and + /// uncapturing (upon backtrack) with specified name + void enableCapture(std::string capture_name); + /// @} protected: + /// Function to indicate match success or failure. Either of these + /// must be called prior to return from an implementation of + /// OperandPrototype::match. + /// @{ bool fail(Value *value, Captures &captures) const; bool success(Value *value, Captures &captures) const; + /// @} + /// Helper functions for the capture logic. + /// @{ bool matchChildren(Value *value, Captures &captures) const; void capture(Value *value, Captures &captures) const; void uncapture(Value *value, Captures &captures) const; + /// @} + /// Helper functions for operation + /// @{ + /// Shallow copy of the operand to allow name change + /// of the capture void copyPropertiesFrom(OperandPrototype const &other) { capture_name_ = other.capture_name_; children_ = other.children_; } - + /// @} private: - std::string capture_name_{""}; - Children children_{}; + /// Data variables for common matching functionality + /// @{ + std::string capture_name_{""}; ///< Name to captured value. Empty means no capture. + Children children_{}; ///< Children to match aginst the values children. + /// @} }; class AnyPattern : public OperandPrototype diff --git a/src/Passes/Source/Rules/ReplacementRule.hpp b/src/Passes/Source/Rules/ReplacementRule.hpp index 5840a9e476..ca5af9052d 100644 --- a/src/Passes/Source/Rules/ReplacementRule.hpp +++ b/src/Passes/Source/Rules/ReplacementRule.hpp @@ -10,6 +10,11 @@ namespace microsoft { namespace quantum { + +/// Rule that describes a pattern and how to make a replacement of the matched values. +/// The class contians a OprandPrototype which is used to test whether an LLVM IR value +/// follows a specific pattern. The class also holds a function pointer to logic that +/// allows replacement of the specified value. class ReplacementRule { public: @@ -29,13 +34,24 @@ class ReplacementRule /// Rule configuration /// @{ + + /// Sets the pattern describing logic to be replaced. void setPattern(OperandPrototypePtr &&pattern); + + /// Sets the replacer logic which given a successful match will perform + /// a replacement on the IR. void setReplacer(ReplaceFunction const &replacer); /// @} /// Operation /// @{ + /// Tests whether a given value matches the rule pattern and store captures. + /// The function returns true if the match was successful in which case captures + /// are recorded. bool match(Value *value, Captures &captures) const; + + /// Invokes the replacer given a matched value and its corresponding captures + // bool replace(Builder &builder, Value *value, Captures &captures, Replacements &replacements) const; /// @} @@ -46,6 +62,18 @@ class ReplacementRule namespace patterns { using OperandPrototypePtr = std::shared_ptr; + +/// @{ +template +inline OperandPrototypePtr Call(std::string const &name, Args... args); +inline OperandPrototypePtr CallByNameOnly(std::string const &name); +inline OperandPrototypePtr BitCast(OperandPrototypePtr arg); +inline OperandPrototypePtr Branch(OperandPrototypePtr cond, OperandPrototypePtr arg1, + OperandPrototypePtr arg2); +inline OperandPrototypePtr Load(OperandPrototypePtr arg); +inline OperandPrototypePtr Store(OperandPrototypePtr target, OperandPrototypePtr value); +/// @} + template inline OperandPrototypePtr Call(std::string const &name, Args... args) { diff --git a/src/Passes/Source/Rules/RuleSet.cpp b/src/Passes/Source/Rules/RuleSet.cpp index 715bc6c5d0..e592645c7d 100644 --- a/src/Passes/Source/Rules/RuleSet.cpp +++ b/src/Passes/Source/Rules/RuleSet.cpp @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + #include "Rules/RuleSet.hpp" #include "AllocationManager/AllocationManager.hpp" diff --git a/src/Passes/Source/Rules/RuleSet.hpp b/src/Passes/Source/Rules/RuleSet.hpp index a7d0a8c11d..e6226874bc 100644 --- a/src/Passes/Source/Rules/RuleSet.hpp +++ b/src/Passes/Source/Rules/RuleSet.hpp @@ -1,4 +1,6 @@ #pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. #include "AllocationManager/AllocationManager.hpp" #include "Llvm/Llvm.hpp" @@ -11,6 +13,10 @@ namespace microsoft { namespace quantum { +/// RuleSet contains a set of replacement rules and the corresponding logic +/// to apply the rules. The class allows one to apply the rules by which +/// each rule is tested one-by-one until a successful attempt at performing +/// a replace has happened, or the list was exhausted. class RuleSet { public: @@ -23,6 +29,7 @@ class RuleSet using Builder = ReplacementRule::Builder; using AllocationManagerPtr = AllocationManager::AllocationManagerPtr; + /// Constructors /// @{ RuleSet() = default; RuleSet(RuleSet const &) = default; @@ -37,10 +44,20 @@ class RuleSet // TODO(tfr): add RuleSet operator&(RuleSet const &other); /// @} + /// Operating rule sets + /// @{ + /// Matches patterns and runs the replacement routines if a match + /// is found. The function returns true if a pattern is matched and + /// and the replacement was a success. In all other cases, it returns + /// false. bool matchAndReplace(Instruction *value, Replacements &replacements); + /// @} + /// Set up and configuration + /// @{ + /// Adds a new replacement rule to the set. void addRule(ReplacementRulePtr const &rule); - + /// @} private: Rules rules_; ///< Rules that describes QIR mappings }; diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index 8a5e91d8d7..0318168070 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -10,8 +10,8 @@ run: build-qaa analysis-example.ll run-replace: build-ir build-qaa build-esa analysis-example.ll # opt -loop-unroll -unroll-count=3 -unroll-allow-partial opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib \ - -load-pass-plugin ../../Debug/Source/Passes/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll > test1.ll - opt -load-pass-plugin ../../Debug/Source/Passes/libTransformationRule.dylib --passes="loop-simplify,loop-unroll,restrict-qir" -S test1.ll > test2.ll + -load-pass-plugin ../../Debug/Source/Passes/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll > analysis-example-step1.ll + opt -load-pass-plugin ../../Debug/Source/Passes/libTransformationRule.dylib --passes="loop-simplify,loop-unroll,restrict-qir" -S analysis-example-step1.ll > analysis-example-final.ll opt --passes="inline" -S test2.ll | opt -O1 -S @@ -34,3 +34,5 @@ analysis-example.ll: clean: cd ConstSizeArray && make clean rm analysis-example.ll + rm analysis-example-step1.ll + rm analysis-example-final.ll diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll index dd27b7facc..81257dc78e 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -6,12 +6,14 @@ source_filename = "qir/ConstSizeArray.ll" %Array = type opaque %String = type opaque +@0 = internal constant [3 x i8] c"()\00" + define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { entry: %0 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) + %1 = call %Result* @__quantum__rt__result_get_one() %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) - call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) br i1 %2, label %then0__1, label %continue__1 then0__1: ; preds = %entry @@ -58,7 +60,7 @@ entry: ret void } -define internal fastcc %Result* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { +define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { entry: %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() @@ -106,11 +108,12 @@ entry: call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) call void @__quantum__rt__result_update_reference_count(%Result* %27, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 -1) call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) - ret %Result* %31 + ret void } declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr @@ -170,10 +173,6 @@ entry: ret void } -declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr - declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) local_unnamed_addr declare void @__quantum__qis__h(%Qubit*) local_unnamed_addr @@ -182,33 +181,29 @@ declare void @__quantum__qis__x(%Qubit*) local_unnamed_addr declare void @__quantum__qis__z(%Qubit*) local_unnamed_addr -define i8 @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { +declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr + +declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr + +define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { entry: - %0 = call fastcc %Result* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - %1 = call %Result* @__quantum__rt__result_get_zero() - %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) - %not. = xor i1 %2, true - %3 = sext i1 %not. to i8 - call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) - ret i8 %3 + call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + ret void } -declare %Result* @__quantum__rt__result_get_zero() local_unnamed_addr - define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { entry: - %0 = call fastcc %Result* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - %1 = call %String* @__quantum__rt__result_to_string(%Result* %0) - call void @__quantum__rt__message(%String* %1) - call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) - call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i64 0, i64 0)) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) ret void } declare void @__quantum__rt__message(%String*) local_unnamed_addr -declare %String* @__quantum__rt__result_to_string(%Result*) local_unnamed_addr - declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr attributes #0 = { "InteropFriendly" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/test.ll b/src/Passes/examples/QubitAllocationAnalysis/test.ll deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/Passes/examples/QubitAllocationAnalysis/test1.ll b/src/Passes/examples/QubitAllocationAnalysis/test1.ll deleted file mode 100644 index ac6d18ca6f..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/test1.ll +++ /dev/null @@ -1,378 +0,0 @@ -; ModuleID = 'analysis-example.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Qubit = type opaque -%Result = type opaque -%Array = type opaque -%String = type opaque - -@0 = internal constant [3 x i8] c"()\00" - -define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { -entry: - %0 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.1(%Qubit* %src) - %1 = call %Result* @__quantum__rt__result_get_one() - %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) - call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) - br i1 %2, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %dest) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - %3 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.2(%Qubit* %intermediary) - %4 = call %Result* @__quantum__rt__result_get_one() - %5 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %4) - call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) - br i1 %5, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - call fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %dest) - br label %continue__2 - -continue__2: ; preds = %then0__2, %continue__1 - ret void -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) unnamed_addr { -entry: - %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -declare %Result* @__quantum__rt__result_get_one() local_unnamed_addr - -declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr - -declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr - -define internal fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) unnamed_addr { -entry: - call void @__quantum__qis__z(%Qubit* %qubit) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) unnamed_addr { -entry: - call void @__quantum__qis__x(%Qubit* %qubit) - ret void -} - -define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { -entry: - %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() - %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() - %leftPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 1) - %rightPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 1) - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) - %1 = bitcast i8* %0 to %Qubit** - %2 = load %Qubit*, %Qubit** %1, align 8 - %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) - %4 = bitcast i8* %3 to %Qubit** - %5 = load %Qubit*, %Qubit** %4, align 8 - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %5) - %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) - %7 = bitcast i8* %6 to %Qubit** - %8 = load %Qubit*, %Qubit** %7, align 8 - %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %10 = bitcast i8* %9 to %Qubit** - %11 = load %Qubit*, %Qubit** %10, align 8 - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %8, %Qubit* %11) - %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) - %13 = bitcast i8* %12 to %Qubit** - %14 = load %Qubit*, %Qubit** %13, align 8 - %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) - %16 = bitcast i8* %15 to %Qubit** - %17 = load %Qubit*, %Qubit** %16, align 8 - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %14, %Qubit* %17) - %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) - %19 = bitcast i8* %18 to %Qubit** - %20 = load %Qubit*, %Qubit** %19, align 8 - %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) - %22 = bitcast i8* %21 to %Qubit** - %23 = load %Qubit*, %Qubit** %22, align 8 - %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %25 = bitcast i8* %24 to %Qubit** - %26 = load %Qubit*, %Qubit** %25, align 8 - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %20, %Qubit* %23, %Qubit* %26) - %27 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3(%Qubit* %leftMessage) - %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %29 = bitcast i8* %28 to %Qubit** - %30 = load %Qubit*, %Qubit** %29, align 8 - %31 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4(%Qubit* %30) - call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %27, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 -1) - call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) - call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) - call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) - call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) - ret void -} - -declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -define internal fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %left, %Qubit* %right) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %left) - call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %left, %Qubit* %right) - ret void -} - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr - -define internal fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { -entry: - call fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %src, %Qubit* %intermediary) - call fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) unnamed_addr { -entry: - call void @__quantum__qis__h(%Qubit* %qubit) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) unnamed_addr { -entry: - call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) - ret void -} - -define internal fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %left, %Qubit* %right) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %left, %Qubit* %right) - call fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %left) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) - ret void -} - -declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) local_unnamed_addr - -declare void @__quantum__qis__h(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__x(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__z(%Qubit*) local_unnamed_addr - -declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr - -declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr - -define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { -entry: - call void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.5() - ret void -} - -define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { -entry: - call void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.6() - %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i64 0, i64 0)) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.1(%Qubit* %target) unnamed_addr { -entry: - %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.2(%Qubit* %target) unnamed_addr { -entry: - %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3(%Qubit* %target) unnamed_addr { -entry: - %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4(%Qubit* %target) unnamed_addr { -entry: - %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.5() unnamed_addr { -entry: - %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() - %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() - %leftPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 1) - %rightPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 1) - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) - %1 = bitcast i8* %0 to %Qubit** - %2 = load %Qubit*, %Qubit** %1, align 8 - %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) - %4 = bitcast i8* %3 to %Qubit** - %5 = load %Qubit*, %Qubit** %4, align 8 - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %5) - %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) - %7 = bitcast i8* %6 to %Qubit** - %8 = load %Qubit*, %Qubit** %7, align 8 - %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %10 = bitcast i8* %9 to %Qubit** - %11 = load %Qubit*, %Qubit** %10, align 8 - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %8, %Qubit* %11) - %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) - %13 = bitcast i8* %12 to %Qubit** - %14 = load %Qubit*, %Qubit** %13, align 8 - %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) - %16 = bitcast i8* %15 to %Qubit** - %17 = load %Qubit*, %Qubit** %16, align 8 - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %14, %Qubit* %17) - %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) - %19 = bitcast i8* %18 to %Qubit** - %20 = load %Qubit*, %Qubit** %19, align 8 - %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) - %22 = bitcast i8* %21 to %Qubit** - %23 = load %Qubit*, %Qubit** %22, align 8 - %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %25 = bitcast i8* %24 to %Qubit** - %26 = load %Qubit*, %Qubit** %25, align 8 - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %20, %Qubit* %23, %Qubit* %26) - %27 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.7(%Qubit* %leftMessage) - %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %29 = bitcast i8* %28 to %Qubit** - %30 = load %Qubit*, %Qubit** %29, align 8 - %31 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.8(%Qubit* %30) - call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %27, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 -1) - call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) - call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) - call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) - call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) - ret void -} - -define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.6() unnamed_addr { -entry: - %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() - %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() - %leftPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 1) - %rightPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 1) - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) - %1 = bitcast i8* %0 to %Qubit** - %2 = load %Qubit*, %Qubit** %1, align 8 - %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) - %4 = bitcast i8* %3 to %Qubit** - %5 = load %Qubit*, %Qubit** %4, align 8 - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %5) - %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) - %7 = bitcast i8* %6 to %Qubit** - %8 = load %Qubit*, %Qubit** %7, align 8 - %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %10 = bitcast i8* %9 to %Qubit** - %11 = load %Qubit*, %Qubit** %10, align 8 - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %8, %Qubit* %11) - %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) - %13 = bitcast i8* %12 to %Qubit** - %14 = load %Qubit*, %Qubit** %13, align 8 - %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) - %16 = bitcast i8* %15 to %Qubit** - %17 = load %Qubit*, %Qubit** %16, align 8 - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %14, %Qubit* %17) - %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) - %19 = bitcast i8* %18 to %Qubit** - %20 = load %Qubit*, %Qubit** %19, align 8 - %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 1) - %22 = bitcast i8* %21 to %Qubit** - %23 = load %Qubit*, %Qubit** %22, align 8 - %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %25 = bitcast i8* %24 to %Qubit** - %26 = load %Qubit*, %Qubit** %25, align 8 - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %20, %Qubit* %23, %Qubit* %26) - %27 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.9(%Qubit* %leftMessage) - %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) - %29 = bitcast i8* %28 to %Qubit** - %30 = load %Qubit*, %Qubit** %29, align 8 - %31 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.10(%Qubit* %30) - call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %27, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 -1) - call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) - call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) - call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) - call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) - ret void -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.7(%Qubit* %target) unnamed_addr { -entry: - %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.8(%Qubit* %target) unnamed_addr { -entry: - %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.9(%Qubit* %target) unnamed_addr { -entry: - %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.10(%Qubit* %target) unnamed_addr { -entry: - %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/test2.ll b/src/Passes/examples/QubitAllocationAnalysis/test2.ll deleted file mode 100644 index d3a835c836..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/test2.ll +++ /dev/null @@ -1,288 +0,0 @@ -; ModuleID = 'test1.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Qubit = type opaque -%Result = type opaque -%Array = type opaque -%String = type opaque - -@0 = internal constant [3 x i8] c"()\00" - -define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { -entry: - %0 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.1(%Qubit* %src) - %1 = call i1 @__quantum__qir__read_result(%Result* %0) - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %dest) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - %2 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.2(%Qubit* %intermediary) - %3 = call i1 @__quantum__qir__read_result(%Result* %2) - br i1 %3, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - call fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %dest) - br label %continue__2 - -continue__2: ; preds = %then0__2, %continue__1 - ret void -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) unnamed_addr { -entry: - %result = inttoptr i64 0 to %Result* - call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -declare %Result* @__quantum__rt__result_get_one() local_unnamed_addr - -declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr - -declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr - -define internal fastcc void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) unnamed_addr { -entry: - call void @__quantum__qis__z(%Qubit* %qubit) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) unnamed_addr { -entry: - call void @__quantum__qis__x(%Qubit* %qubit) - ret void -} - -define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { -entry: - %leftMessage = inttoptr i64 0 to %Qubit* - %rightMessage = inttoptr i64 1 to %Qubit* - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) - %0 = inttoptr i64 0 to %Qubit* - %1 = inttoptr i64 2 to %Qubit* - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %0, %Qubit* %1) - %2 = inttoptr i64 1 to %Qubit* - %3 = inttoptr i64 3 to %Qubit* - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %3) - %4 = inttoptr i64 0 to %Qubit* - %5 = inttoptr i64 2 to %Qubit* - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %4, %Qubit* %5) - %6 = inttoptr i64 2 to %Qubit* - %7 = inttoptr i64 1 to %Qubit* - %8 = inttoptr i64 3 to %Qubit* - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %6, %Qubit* %7, %Qubit* %8) - %9 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3(%Qubit* %leftMessage) - %10 = inttoptr i64 3 to %Qubit* - %11 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4(%Qubit* %10) - ret void -} - -declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -define internal fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %left, %Qubit* %right) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %left) - call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %left, %Qubit* %right) - ret void -} - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr - -define internal fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { -entry: - call fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %src, %Qubit* %intermediary) - call fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) unnamed_addr { -entry: - call void @__quantum__qis__h(%Qubit* %qubit) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) unnamed_addr { -entry: - call void @__quantum__qis__cnot(%Qubit* %control, %Qubit* %target) - ret void -} - -define internal fastcc void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %left, %Qubit* %right) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %left, %Qubit* %right) - call fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %left) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) - ret void -} - -define internal fastcc void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) unnamed_addr { -entry: - call fastcc void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) - ret void -} - -declare void @__quantum__qis__cnot(%Qubit*, %Qubit*) local_unnamed_addr - -declare void @__quantum__qis__h(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__x(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__z(%Qubit*) local_unnamed_addr - -declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr - -declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr - -declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr - -define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { -entry: - call void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.5() - ret void -} - -define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { -entry: - call void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.6() - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.1(%Qubit* %target) unnamed_addr { -entry: - %result = inttoptr i64 1 to %Result* - call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.2(%Qubit* %target) unnamed_addr { -entry: - %result = inttoptr i64 2 to %Result* - call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3(%Qubit* %target) unnamed_addr { -entry: - %result = inttoptr i64 3 to %Result* - call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4(%Qubit* %target) unnamed_addr { -entry: - %result = inttoptr i64 4 to %Result* - call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.5() unnamed_addr { -entry: - %leftMessage = inttoptr i64 4 to %Qubit* - %rightMessage = inttoptr i64 5 to %Qubit* - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) - %0 = inttoptr i64 6 to %Qubit* - %1 = inttoptr i64 8 to %Qubit* - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %0, %Qubit* %1) - %2 = inttoptr i64 7 to %Qubit* - %3 = inttoptr i64 9 to %Qubit* - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %3) - %4 = inttoptr i64 6 to %Qubit* - %5 = inttoptr i64 8 to %Qubit* - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %4, %Qubit* %5) - %6 = inttoptr i64 8 to %Qubit* - %7 = inttoptr i64 7 to %Qubit* - %8 = inttoptr i64 9 to %Qubit* - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %6, %Qubit* %7, %Qubit* %8) - %9 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.7(%Qubit* %leftMessage) - %10 = inttoptr i64 9 to %Qubit* - %11 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.8(%Qubit* %10) - ret void -} - -define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.6() unnamed_addr { -entry: - %leftMessage = inttoptr i64 10 to %Qubit* - %rightMessage = inttoptr i64 11 to %Qubit* - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) - %0 = inttoptr i64 12 to %Qubit* - %1 = inttoptr i64 14 to %Qubit* - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %0, %Qubit* %1) - %2 = inttoptr i64 13 to %Qubit* - %3 = inttoptr i64 15 to %Qubit* - call fastcc void @TeleportChain__PrepareEntangledPair__body(%Qubit* %2, %Qubit* %3) - %4 = inttoptr i64 12 to %Qubit* - %5 = inttoptr i64 14 to %Qubit* - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %4, %Qubit* %5) - %6 = inttoptr i64 14 to %Qubit* - %7 = inttoptr i64 13 to %Qubit* - %8 = inttoptr i64 15 to %Qubit* - call fastcc void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %6, %Qubit* %7, %Qubit* %8) - %9 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.9(%Qubit* %leftMessage) - %10 = inttoptr i64 15 to %Qubit* - %11 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.10(%Qubit* %10) - ret void -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.7(%Qubit* %target) unnamed_addr { -entry: - %result = inttoptr i64 5 to %Result* - call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.8(%Qubit* %target) unnamed_addr { -entry: - %result = inttoptr i64 6 to %Result* - call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.3.9(%Qubit* %target) unnamed_addr { -entry: - %result = inttoptr i64 7 to %Result* - call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -define internal fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body.4.10(%Qubit* %target) unnamed_addr { -entry: - %result = inttoptr i64 8 to %Result* - call void @__quantum__qis__mz__body(%Qubit* %target, %Result* %result) - call void @__quantum__qis__reset__body(%Qubit* %target) - ret %Result* %result -} - -declare i1 @__quantum__qir__read_result(%Result*) - -declare void @__quantum__qis__mz__body(%Qubit*, %Result*) - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } From 9604ef63c31e091d0b36fc209aec7483425577d5 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 13 Aug 2021 14:45:08 +0200 Subject: [PATCH 089/106] Refactor location of operands --- src/Passes/Source/CMakeLists.txt | 4 +- src/Passes/Source/Rules/OperandPrototype.cpp | 84 ------------------- src/Passes/Source/Rules/OperandPrototype.hpp | 39 --------- src/Passes/Source/Rules/Operands/Any.cpp | 23 +++++ src/Passes/Source/Rules/Operands/Any.hpp | 24 ++++++ src/Passes/Source/Rules/Operands/Call.cpp | 44 ++++++++++ src/Passes/Source/Rules/Operands/Call.hpp | 30 +++++++ .../Source/Rules/Operands/Instruction.cpp | 52 ++++++++++++ .../Source/Rules/Operands/Instruction.hpp | 30 +++++++ src/Passes/Source/Rules/ReplacementRule.hpp | 4 +- 10 files changed, 209 insertions(+), 125 deletions(-) create mode 100644 src/Passes/Source/Rules/Operands/Any.cpp create mode 100644 src/Passes/Source/Rules/Operands/Any.hpp create mode 100644 src/Passes/Source/Rules/Operands/Call.cpp create mode 100644 src/Passes/Source/Rules/Operands/Call.hpp create mode 100644 src/Passes/Source/Rules/Operands/Instruction.cpp create mode 100644 src/Passes/Source/Rules/Operands/Instruction.hpp diff --git a/src/Passes/Source/CMakeLists.txt b/src/Passes/Source/CMakeLists.txt index bee8473e9a..8ce3881d90 100644 --- a/src/Passes/Source/CMakeLists.txt +++ b/src/Passes/Source/CMakeLists.txt @@ -33,10 +33,12 @@ target_link_libraries(AllocationManager # Creating the rules library file(GLOB RULES_SOURCE RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/Rules/*.cpp) +file(GLOB RULES_OPRANDS_SOURCE RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/Rules/Oprands/*.cpp) add_library(Rules SHARED - ${RULES_SOURCE} ) + ${RULES_SOURCE} + ${RULES_OPRANDS_SOURCE} ) target_include_directories( Rules diff --git a/src/Passes/Source/Rules/OperandPrototype.cpp b/src/Passes/Source/Rules/OperandPrototype.cpp index 9ed3e3c3be..31d4a68194 100644 --- a/src/Passes/Source/Rules/OperandPrototype.cpp +++ b/src/Passes/Source/Rules/OperandPrototype.cpp @@ -87,89 +87,5 @@ void OperandPrototype::uncapture(Value * /*value*/, Captures &captures) const } } -CallPattern::CallPattern(String const &name) - : name_{name} -{} - -CallPattern::~CallPattern() = default; - -bool CallPattern::match(Value *instr, Captures &captures) const -{ - auto *call_instr = llvm::dyn_cast(instr); - if (call_instr == nullptr) - { - return fail(instr, captures); - } - - auto target_function = call_instr->getCalledFunction(); - auto name = target_function->getName(); - - if (name != name_) - { - return fail(instr, captures); - } - - return success(instr, captures); -} - -CallPattern::Child CallPattern::copy() const -{ - auto ret = std::make_shared(name_); - ret->copyPropertiesFrom(*this); - return std::move(ret); -} - -AnyPattern::AnyPattern() = default; -AnyPattern::~AnyPattern() = default; -bool AnyPattern::match(Value *instr, Captures &captures) const -{ - return success(instr, captures); -} - -AnyPattern::Child AnyPattern::copy() const -{ - return std::make_shared(); -} - -template -InstructionPattern::~InstructionPattern() = default; -template -bool InstructionPattern::match(Value *instr, Captures &captures) const -{ - auto *load_instr = llvm::dyn_cast(instr); - if (load_instr == nullptr) - { - return fail(instr, captures); - } - - return success(instr, captures); -} - -template -typename InstructionPattern::Child InstructionPattern::copy() const -{ - auto ret = std::make_shared>(); - ret->copyPropertiesFrom(*this); - return std::move(ret); -} - -// TODO(tfr): This seems to be a bug in LLVM. Template instantiations in -// a single translation unit is not supposed to reinstantiate across other -// translation units. -// -// However, it is suspecious that htis problem has been around since Clang 8. -// so this needs more investigation. For now, this work around suffices -// See -// https://bugs.llvm.org/show_bug.cgi?id=18733 -// https://stackoverflow.com/questions/56041900/why-does-explicit-template-instantiation-result-in-weak-template-vtables-warning -// for more information -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wweak-template-vtables" -template class InstructionPattern; -template class InstructionPattern; -template class InstructionPattern; -template class InstructionPattern; -#pragma clang diagnostic pop - } // namespace quantum } // namespace microsoft diff --git a/src/Passes/Source/Rules/OperandPrototype.hpp b/src/Passes/Source/Rules/OperandPrototype.hpp index c365ce4e6d..baab484234 100644 --- a/src/Passes/Source/Rules/OperandPrototype.hpp +++ b/src/Passes/Source/Rules/OperandPrototype.hpp @@ -81,44 +81,5 @@ class OperandPrototype /// @} }; -class AnyPattern : public OperandPrototype -{ -public: - AnyPattern(); - ~AnyPattern() override; - bool match(Value *instr, Captures &captures) const override; - Child copy() const override; -}; - -class CallPattern : public OperandPrototype -{ -public: - using String = std::string; - CallPattern(String const &name); - - ~CallPattern() override; - - bool match(Value *instr, Captures &captures) const override; - Child copy() const override; - -private: - String name_{}; -}; - -template -class InstructionPattern : public OperandPrototype -{ -public: - using OperandPrototype::OperandPrototype; - ~InstructionPattern() override; - bool match(Value *instr, Captures &captures) const override; - Child copy() const override; -}; - -using StorePattern = InstructionPattern; -using LoadPattern = InstructionPattern; -using BitCastPattern = InstructionPattern; -using BranchPattern = InstructionPattern; - } // namespace quantum } // namespace microsoft diff --git a/src/Passes/Source/Rules/Operands/Any.cpp b/src/Passes/Source/Rules/Operands/Any.cpp new file mode 100644 index 0000000000..69e8cbc40d --- /dev/null +++ b/src/Passes/Source/Rules/Operands/Any.cpp @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#include "Rules/Operands/Any.hpp" + +#include "Rules/OperandPrototype.hpp" + +namespace microsoft { +namespace quantum { + +AnyPattern::AnyPattern() = default; +AnyPattern::~AnyPattern() = default; +bool AnyPattern::match(Value *instr, Captures &captures) const +{ + return success(instr, captures); +} + +AnyPattern::Child AnyPattern::copy() const +{ + return std::make_shared(); +} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Operands/Any.hpp b/src/Passes/Source/Rules/Operands/Any.hpp new file mode 100644 index 0000000000..a046ee4ed0 --- /dev/null +++ b/src/Passes/Source/Rules/Operands/Any.hpp @@ -0,0 +1,24 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" +#include "Rules/OperandPrototype.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { + +class AnyPattern : public OperandPrototype +{ +public: + AnyPattern(); + ~AnyPattern() override; + bool match(Value *instr, Captures &captures) const override; + Child copy() const override; +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Operands/Call.cpp b/src/Passes/Source/Rules/Operands/Call.cpp new file mode 100644 index 0000000000..863b057710 --- /dev/null +++ b/src/Passes/Source/Rules/Operands/Call.cpp @@ -0,0 +1,44 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Rules/Operands/Call.hpp" + +#include "Rules/OperandPrototype.hpp" + +namespace microsoft { +namespace quantum { + +CallPattern::CallPattern(String const &name) + : name_{name} +{} + +CallPattern::~CallPattern() = default; + +bool CallPattern::match(Value *instr, Captures &captures) const +{ + auto *call_instr = llvm::dyn_cast(instr); + if (call_instr == nullptr) + { + return fail(instr, captures); + } + + auto target_function = call_instr->getCalledFunction(); + auto name = target_function->getName(); + + if (name != name_) + { + return fail(instr, captures); + } + + return success(instr, captures); +} + +CallPattern::Child CallPattern::copy() const +{ + auto ret = std::make_shared(name_); + ret->copyPropertiesFrom(*this); + return std::move(ret); +} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Operands/Call.hpp b/src/Passes/Source/Rules/Operands/Call.hpp new file mode 100644 index 0000000000..f5c5aac931 --- /dev/null +++ b/src/Passes/Source/Rules/Operands/Call.hpp @@ -0,0 +1,30 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" +#include "Rules/OperandPrototype.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { + +class CallPattern : public OperandPrototype +{ +public: + using String = std::string; + CallPattern(String const &name); + + ~CallPattern() override; + + bool match(Value *instr, Captures &captures) const override; + Child copy() const override; + +private: + String name_{}; +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Operands/Instruction.cpp b/src/Passes/Source/Rules/Operands/Instruction.cpp new file mode 100644 index 0000000000..320a1e72cf --- /dev/null +++ b/src/Passes/Source/Rules/Operands/Instruction.cpp @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Rules/Operands/Instruction.hpp" + +#include "Rules/OperandPrototype.hpp" + +namespace microsoft { +namespace quantum { + +template +InstructionPattern::~InstructionPattern() = default; +template +bool InstructionPattern::match(Value *instr, Captures &captures) const +{ + auto *load_instr = llvm::dyn_cast(instr); + if (load_instr == nullptr) + { + return fail(instr, captures); + } + + return success(instr, captures); +} + +template +typename InstructionPattern::Child InstructionPattern::copy() const +{ + auto ret = std::make_shared>(); + ret->copyPropertiesFrom(*this); + return std::move(ret); +} + +// TODO(tfr): This seems to be a bug in LLVM. Template instantiations in +// a single translation unit is not supposed to reinstantiate across other +// translation units. +// +// However, it is suspecious that htis problem has been around since Clang 8. +// so this needs more investigation. For now, this work around suffices +// See +// https://bugs.llvm.org/show_bug.cgi?id=18733 +// https://stackoverflow.com/questions/56041900/why-does-explicit-template-instantiation-result-in-weak-template-vtables-warning +// for more information +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wweak-template-vtables" +template class InstructionPattern; +template class InstructionPattern; +template class InstructionPattern; +template class InstructionPattern; +#pragma clang diagnostic pop + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Operands/Instruction.hpp b/src/Passes/Source/Rules/Operands/Instruction.hpp new file mode 100644 index 0000000000..a116ad7007 --- /dev/null +++ b/src/Passes/Source/Rules/Operands/Instruction.hpp @@ -0,0 +1,30 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" +#include "Rules/OperandPrototype.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { + +template +class InstructionPattern : public OperandPrototype +{ +public: + using OperandPrototype::OperandPrototype; + ~InstructionPattern() override; + bool match(Value *instr, Captures &captures) const override; + Child copy() const override; +}; + +using StorePattern = InstructionPattern; +using LoadPattern = InstructionPattern; +using BitCastPattern = InstructionPattern; +using BranchPattern = InstructionPattern; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/ReplacementRule.hpp b/src/Passes/Source/Rules/ReplacementRule.hpp index ca5af9052d..a000dedcaa 100644 --- a/src/Passes/Source/Rules/ReplacementRule.hpp +++ b/src/Passes/Source/Rules/ReplacementRule.hpp @@ -3,7 +3,9 @@ // Licensed under the MIT License. #include "Llvm/Llvm.hpp" -#include "Rules/OperandPrototype.hpp" +#include "Rules/Operands/Any.hpp" +#include "Rules/Operands/Call.hpp" +#include "Rules/Operands/Instruction.hpp" #include #include From 370e8a2f89dc67a99e2c99b3ea0d0517d2ac0ee4 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 13 Aug 2021 18:02:13 +0200 Subject: [PATCH 090/106] More refactor --- src/Passes/Source/Apps/CMakeLists.txt | 2 +- src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp | 4 +- src/Passes/Source/CMakeLists.txt | 6 +- src/Passes/Source/Rules/Factory.cpp | 3 +- src/Passes/Source/Rules/Notation/BitCast.cpp | 29 +++++ src/Passes/Source/Rules/Notation/Branch.cpp | 33 ++++++ src/Passes/Source/Rules/Notation/Call.ipp | 41 +++++++ .../Source/Rules/Notation/CallByNameOnly.cpp | 27 +++++ src/Passes/Source/Rules/Notation/Capture.cpp | 37 ++++++ src/Passes/Source/Rules/Notation/Load.cpp | 29 +++++ src/Passes/Source/Rules/Notation/Notation.cpp | 31 +++++ src/Passes/Source/Rules/Notation/Notation.hpp | 56 +++++++++ src/Passes/Source/Rules/Notation/Store.cpp | 28 +++++ src/Passes/Source/Rules/ReplacementRule.hpp | 112 ------------------ 14 files changed, 319 insertions(+), 119 deletions(-) create mode 100644 src/Passes/Source/Rules/Notation/BitCast.cpp create mode 100644 src/Passes/Source/Rules/Notation/Branch.cpp create mode 100644 src/Passes/Source/Rules/Notation/Call.ipp create mode 100644 src/Passes/Source/Rules/Notation/CallByNameOnly.cpp create mode 100644 src/Passes/Source/Rules/Notation/Capture.cpp create mode 100644 src/Passes/Source/Rules/Notation/Load.cpp create mode 100644 src/Passes/Source/Rules/Notation/Notation.cpp create mode 100644 src/Passes/Source/Rules/Notation/Notation.hpp create mode 100644 src/Passes/Source/Rules/Notation/Store.cpp diff --git a/src/Passes/Source/Apps/CMakeLists.txt b/src/Passes/Source/Apps/CMakeLists.txt index 3162c7e35f..471cae5f5b 100644 --- a/src/Passes/Source/Apps/CMakeLists.txt +++ b/src/Passes/Source/Apps/CMakeLists.txt @@ -1,4 +1,4 @@ -add_executable(qat Qat/Qat.cpp) +add_executable(qat Qat/Qat.cpp Qat/LlvmAnalysis.cpp) target_link_libraries(qat ${llvm_libs}) target_link_libraries(qat ExpandStaticAllocation QubitAllocationAnalysis TransformationRule Rules AllocationManager Commandline Profiles) \ No newline at end of file diff --git a/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp b/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp index e4fb7ae980..4ff608ff0e 100644 --- a/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp +++ b/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp @@ -1,4 +1,3 @@ -#pragma once // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. @@ -12,9 +11,8 @@ namespace quantum { LlvmAnalyser::LlvmAnalyser(bool debug) : loop_analysis_manager{debug} , function_analysis_manager{debug} - , cGSCCAnalysisManager{debug} + , gscc_analysis_manager{debug} , module_analysis_manager{debug} - , { pass_builder.registerModuleAnalyses(module_analysis_manager); pass_builder.registerCGSCCAnalyses(gscc_analysis_manager); diff --git a/src/Passes/Source/CMakeLists.txt b/src/Passes/Source/CMakeLists.txt index 8ce3881d90..4830ee92f7 100644 --- a/src/Passes/Source/CMakeLists.txt +++ b/src/Passes/Source/CMakeLists.txt @@ -33,12 +33,14 @@ target_link_libraries(AllocationManager # Creating the rules library file(GLOB RULES_SOURCE RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/Rules/*.cpp) -file(GLOB RULES_OPRANDS_SOURCE RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/Rules/Oprands/*.cpp) +file(GLOB RULES_OPRANDS_SOURCE RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/Rules/Operands/*.cpp) +file(GLOB RULES_NOTATION_SOURCE RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/Rules/Notation/*.cpp) add_library(Rules SHARED ${RULES_SOURCE} - ${RULES_OPRANDS_SOURCE} ) + ${RULES_OPRANDS_SOURCE} + ${RULES_NOTATION_SOURCE} ) target_include_directories( Rules diff --git a/src/Passes/Source/Rules/Factory.cpp b/src/Passes/Source/Rules/Factory.cpp index 55ba1fa2d9..635af5306c 100644 --- a/src/Passes/Source/Rules/Factory.cpp +++ b/src/Passes/Source/Rules/Factory.cpp @@ -4,11 +4,12 @@ #include "Rules/Factory.hpp" #include "Llvm/Llvm.hpp" +#include "Rules/Notation/Notation.hpp" namespace microsoft { namespace quantum { using ReplacementRulePtr = RuleFactory::ReplacementRulePtr; -using namespace microsoft::quantum::patterns; +using namespace microsoft::quantum::notation; RuleFactory::RuleFactory(RuleSet &rule_set) : rule_set_{rule_set} diff --git a/src/Passes/Source/Rules/Notation/BitCast.cpp b/src/Passes/Source/Rules/Notation/BitCast.cpp new file mode 100644 index 0000000000..9e854bae1e --- /dev/null +++ b/src/Passes/Source/Rules/Notation/BitCast.cpp @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" +#include "Rules/Notation/Notation.hpp" +#include "Rules/Operands/Any.hpp" +#include "Rules/Operands/Call.hpp" +#include "Rules/Operands/Instruction.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +namespace notation { + +using OperandPrototypePtr = std::shared_ptr; + +OperandPrototypePtr BitCast(OperandPrototypePtr arg) +{ + auto cast_pattern = std::make_shared(); + + cast_pattern->addChild(arg); + return static_cast(cast_pattern); +} + +} // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/Branch.cpp b/src/Passes/Source/Rules/Notation/Branch.cpp new file mode 100644 index 0000000000..d7889f3476 --- /dev/null +++ b/src/Passes/Source/Rules/Notation/Branch.cpp @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" +#include "Rules/Notation/Notation.hpp" +#include "Rules/Operands/Any.hpp" +#include "Rules/Operands/Call.hpp" +#include "Rules/Operands/Instruction.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +namespace notation { + +using OperandPrototypePtr = std::shared_ptr; + +OperandPrototypePtr Branch(OperandPrototypePtr cond, OperandPrototypePtr arg1, + OperandPrototypePtr arg2) +{ + auto branch_pattern = std::make_shared(); + + branch_pattern->addChild(cond); + branch_pattern->addChild(arg1); + branch_pattern->addChild(arg2); + + return static_cast(branch_pattern); +} + +} // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/Call.ipp b/src/Passes/Source/Rules/Notation/Call.ipp new file mode 100644 index 0000000000..da701e49c8 --- /dev/null +++ b/src/Passes/Source/Rules/Notation/Call.ipp @@ -0,0 +1,41 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" +#include "Rules/Notation/Notation.hpp" +#include "Rules/Operands/Any.hpp" +#include "Rules/Operands/Call.hpp" +#include "Rules/Operands/Instruction.hpp" +#include "Rules/ReplacementRule.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +namespace notation { + +using OperandPrototypePtr = std::shared_ptr; + +template +OperandPrototypePtr Call(std::string const &name, Args... args) +{ + OperandPrototypePtr ret = std::make_shared(name); + std::vector arguments{args...}; + + // Adding arguments to matching + for (auto &a : arguments) + { + ret->addChild(a); + } + + // Function name is kept in the last operand + ret->addChild(std::make_shared()); + + return ret; +} + +} // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/CallByNameOnly.cpp b/src/Passes/Source/Rules/Notation/CallByNameOnly.cpp new file mode 100644 index 0000000000..87cf97d886 --- /dev/null +++ b/src/Passes/Source/Rules/Notation/CallByNameOnly.cpp @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" +#include "Rules/Notation/Notation.hpp" +#include "Rules/Operands/Any.hpp" +#include "Rules/Operands/Call.hpp" +#include "Rules/Operands/Instruction.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +namespace notation { + +using OperandPrototypePtr = std::shared_ptr; + +OperandPrototypePtr CallByNameOnly(std::string const &name) +{ + OperandPrototypePtr ret = std::make_shared(name); + return ret; +} + +} // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/Capture.cpp b/src/Passes/Source/Rules/Notation/Capture.cpp new file mode 100644 index 0000000000..867c438285 --- /dev/null +++ b/src/Passes/Source/Rules/Notation/Capture.cpp @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" +#include "Rules/Notation/Notation.hpp" +#include "Rules/Operands/Any.hpp" +#include "Rules/Operands/Call.hpp" +#include "Rules/Operands/Instruction.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +namespace notation { + +using OperandPrototypePtr = std::shared_ptr; + +Capture::Capture(std::string const &name) + : name_{name} +{} + +OperandPrototypePtr Capture::operator=(OperandPrototypePtr const &other) +{ + auto ret = other->copy(); + ret->enableCapture(name_); + return ret; +} + +Capture operator""_cap(char const *name, std::size_t) +{ + return Capture(name); +} + +} // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/Load.cpp b/src/Passes/Source/Rules/Notation/Load.cpp new file mode 100644 index 0000000000..1f0711677b --- /dev/null +++ b/src/Passes/Source/Rules/Notation/Load.cpp @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" +#include "Rules/Notation/Notation.hpp" +#include "Rules/Operands/Any.hpp" +#include "Rules/Operands/Call.hpp" +#include "Rules/Operands/Instruction.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +namespace notation { + +using OperandPrototypePtr = std::shared_ptr; + +OperandPrototypePtr Load(OperandPrototypePtr arg) +{ + auto ret = std::make_shared(); + + ret->addChild(arg); + return static_cast(ret); +} + +} // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/Notation.cpp b/src/Passes/Source/Rules/Notation/Notation.cpp new file mode 100644 index 0000000000..a72f33a49e --- /dev/null +++ b/src/Passes/Source/Rules/Notation/Notation.cpp @@ -0,0 +1,31 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Rules/Notation/Notation.hpp" + +#include "Llvm/Llvm.hpp" +#include "Rules/Operands/Any.hpp" +#include "Rules/Operands/Call.hpp" +#include "Rules/Operands/Instruction.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +namespace notation { + +std::function +deleteInstruction() +{ + return [](ReplacementRule::Builder &, ReplacementRule::Value *val, ReplacementRule::Captures &, + ReplacementRule::Replacements &replacements) { + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }; +} + +} // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/Notation.hpp b/src/Passes/Source/Rules/Notation/Notation.hpp new file mode 100644 index 0000000000..f638099848 --- /dev/null +++ b/src/Passes/Source/Rules/Notation/Notation.hpp @@ -0,0 +1,56 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" +#include "Rules/Notation/Call.ipp" +#include "Rules/Operands/Any.hpp" +#include "Rules/Operands/Call.hpp" +#include "Rules/Operands/Instruction.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +namespace notation { + +using OperandPrototypePtr = std::shared_ptr; + +class Capture +{ +public: + Capture(std::string const &name); + OperandPrototypePtr operator=(OperandPrototypePtr const &other); + +private: + std::string name_{}; +}; + +/// @{ +template +OperandPrototypePtr Call(std::string const &name, Args... args); +OperandPrototypePtr CallByNameOnly(std::string const &name); +OperandPrototypePtr BitCast(OperandPrototypePtr arg); +OperandPrototypePtr Branch(OperandPrototypePtr cond, OperandPrototypePtr arg1, + OperandPrototypePtr arg2); +OperandPrototypePtr Load(OperandPrototypePtr arg); +OperandPrototypePtr Store(OperandPrototypePtr target, OperandPrototypePtr value); +/// @} + +/// @{ +static std::shared_ptr _ = std::make_shared(); +/// @} + +/// @{ +std::function +deleteInstruction(); + +/// @} + +Capture operator""_cap(char const *name, std::size_t); + +} // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/Store.cpp b/src/Passes/Source/Rules/Notation/Store.cpp new file mode 100644 index 0000000000..2c8379274f --- /dev/null +++ b/src/Passes/Source/Rules/Notation/Store.cpp @@ -0,0 +1,28 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" +#include "Rules/Notation/Notation.hpp" +#include "Rules/Operands/Instruction.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +namespace notation { + +using OperandPrototypePtr = std::shared_ptr; + +OperandPrototypePtr Store(OperandPrototypePtr target, OperandPrototypePtr value) +{ + auto ret = std::make_shared(); + + ret->addChild(target); + ret->addChild(value); + return static_cast(ret); +} + +} // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/ReplacementRule.hpp b/src/Passes/Source/Rules/ReplacementRule.hpp index a000dedcaa..dc5a8c9b5f 100644 --- a/src/Passes/Source/Rules/ReplacementRule.hpp +++ b/src/Passes/Source/Rules/ReplacementRule.hpp @@ -62,117 +62,5 @@ class ReplacementRule ReplaceFunction replacer_{nullptr}; }; -namespace patterns { -using OperandPrototypePtr = std::shared_ptr; - -/// @{ -template -inline OperandPrototypePtr Call(std::string const &name, Args... args); -inline OperandPrototypePtr CallByNameOnly(std::string const &name); -inline OperandPrototypePtr BitCast(OperandPrototypePtr arg); -inline OperandPrototypePtr Branch(OperandPrototypePtr cond, OperandPrototypePtr arg1, - OperandPrototypePtr arg2); -inline OperandPrototypePtr Load(OperandPrototypePtr arg); -inline OperandPrototypePtr Store(OperandPrototypePtr target, OperandPrototypePtr value); -/// @} - -template -inline OperandPrototypePtr Call(std::string const &name, Args... args) -{ - OperandPrototypePtr ret = std::make_shared(name); - std::vector arguments{args...}; - - // Adding arguments to matching - for (auto &a : arguments) - { - ret->addChild(a); - } - - // Function name is kept in the last operand - ret->addChild(std::make_shared()); - - return ret; -} - -inline OperandPrototypePtr CallByNameOnly(std::string const &name) -{ - OperandPrototypePtr ret = std::make_shared(name); - return ret; -} - -inline OperandPrototypePtr BitCast(OperandPrototypePtr arg) -{ - auto cast_pattern = std::make_shared(); - - cast_pattern->addChild(arg); - return static_cast(cast_pattern); -} - -inline OperandPrototypePtr Branch(OperandPrototypePtr cond, OperandPrototypePtr arg1, - OperandPrototypePtr arg2) -{ - auto branch_pattern = std::make_shared(); - - branch_pattern->addChild(cond); - branch_pattern->addChild(arg1); - branch_pattern->addChild(arg2); - - return static_cast(branch_pattern); -} - -inline OperandPrototypePtr Load(OperandPrototypePtr arg) -{ - auto ret = std::make_shared(); - - ret->addChild(arg); - return static_cast(ret); -} - -inline OperandPrototypePtr Store(OperandPrototypePtr target, OperandPrototypePtr value) -{ - auto ret = std::make_shared(); - - ret->addChild(target); - ret->addChild(value); - return static_cast(ret); -} -static std::shared_ptr _ = std::make_shared(); - -class Capture -{ -public: - Capture(std::string const &name) - : name_{name} - {} - - OperandPrototypePtr operator=(OperandPrototypePtr const &other) - { - auto ret = other->copy(); - ret->enableCapture(name_); - return ret; - } - -private: - std::string name_{}; -}; - -inline Capture operator""_cap(char const *name, std::size_t) -{ - return Capture(name); -} - -inline std::function -deleteInstruction() -{ - return [](ReplacementRule::Builder &, ReplacementRule::Value *val, ReplacementRule::Captures &, - ReplacementRule::Replacements &replacements) { - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }; -} - -} // namespace patterns - } // namespace quantum } // namespace microsoft From bdfc6f59f13e2e8777f8ad3fcc36187810f58a8e Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 13 Aug 2021 18:10:01 +0200 Subject: [PATCH 091/106] Fixing style --- .../AllocationManager/AllocationManager.cpp | 168 +++--- .../AllocationManager/AllocationManager.hpp | 167 ++--- src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp | 36 +- src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp | 72 +-- src/Passes/Source/Apps/Qat/Qat.cpp | 163 ++--- .../Source/Commandline/ParameterParser.cpp | 153 ++--- .../Source/Commandline/ParameterParser.hpp | 118 ++-- src/Passes/Source/Commandline/Settings.hpp | 105 ++-- .../ExpandStaticAllocation.cpp | 290 ++++----- .../ExpandStaticAllocation.hpp | 75 +-- .../LibExpandStaticAllocation.cpp | 39 +- .../LibQubitAllocationAnalysis.cpp | 61 +- .../QubitAllocationAnalysis.cpp | 135 +++-- .../QubitAllocationAnalysis.hpp | 115 ++-- .../ResourceRemapper/LibResourceRemapper.cpp | 39 +- .../ResourceRemapper/ResourceRemapper.cpp | 29 +- .../ResourceRemapper/ResourceRemapper.hpp | 52 +- .../LibTransformationRule.cpp | 77 +-- .../TransformationRule/TransformationRule.cpp | 142 ++--- .../TransformationRule/TransformationRule.hpp | 83 +-- src/Passes/Source/Profiles/BaseProfile.cpp | 128 ++-- src/Passes/Source/Profiles/BaseProfile.hpp | 38 +- src/Passes/Source/Profiles/IProfile.cpp | 15 +- src/Passes/Source/Profiles/IProfile.hpp | 46 +- src/Passes/Source/Rules/Factory.cpp | 571 +++++++++--------- src/Passes/Source/Rules/Factory.hpp | 141 ++--- src/Passes/Source/Rules/Notation/BitCast.cpp | 32 +- src/Passes/Source/Rules/Notation/Branch.cpp | 37 +- .../Source/Rules/Notation/CallByNameOnly.cpp | 30 +- src/Passes/Source/Rules/Notation/Capture.cpp | 53 +- src/Passes/Source/Rules/Notation/Load.cpp | 32 +- src/Passes/Source/Rules/Notation/Notation.cpp | 42 +- src/Passes/Source/Rules/Notation/Notation.hpp | 91 +-- src/Passes/Source/Rules/Notation/Store.cpp | 34 +- src/Passes/Source/Rules/OperandPrototype.cpp | 146 ++--- src/Passes/Source/Rules/OperandPrototype.hpp | 136 +++-- src/Passes/Source/Rules/Operands/Any.cpp | 32 +- src/Passes/Source/Rules/Operands/Any.hpp | 29 +- src/Passes/Source/Rules/Operands/Call.cpp | 74 +-- src/Passes/Source/Rules/Operands/Call.hpp | 35 +- .../Source/Rules/Operands/Instruction.cpp | 58 +- .../Source/Rules/Operands/Instruction.hpp | 38 +- src/Passes/Source/Rules/ReplacementRule.cpp | 78 +-- src/Passes/Source/Rules/ReplacementRule.hpp | 96 +-- src/Passes/Source/Rules/RuleSet.cpp | 56 +- src/Passes/Source/Rules/RuleSet.hpp | 101 ++-- 46 files changed, 2211 insertions(+), 2077 deletions(-) diff --git a/src/Passes/Source/AllocationManager/AllocationManager.cpp b/src/Passes/Source/AllocationManager/AllocationManager.cpp index e09fb0bfed..2c150eac4b 100644 --- a/src/Passes/Source/AllocationManager/AllocationManager.cpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.cpp @@ -7,97 +7,99 @@ #include #include -namespace microsoft { -namespace quantum { - -AllocationManager::AllocationManagerPtr AllocationManager::createNew() +namespace microsoft +{ +namespace quantum { - AllocationManagerPtr ret; - ret.reset(new AllocationManager()); - return ret; -} + AllocationManager::AllocationManagerPtr AllocationManager::createNew() + { + AllocationManagerPtr ret; + ret.reset(new AllocationManager()); -AllocationManager::Index AllocationManager::allocate() -{ - auto ret = start_; - ++start_; - return ret; -} + return ret; + } -void AllocationManager::allocate(String const &name, Index const &size, bool value_only) -{ - if (resources_.find(name) != resources_.end()) - { - throw std::runtime_error("Resource with name " + name + " already exists."); - } - - resources_[name].resize(size); - for (auto &v : resources_[name]) - { - v = nullptr; - } - - // Creating a memory segment mappign in case we are dealing with qubits - if (!value_only) - { - MemoryMapping map; - map.name = name; - map.index = mappings_.size(); - map.size = size; - - if (name_to_index_.find(map.name) != name_to_index_.end()) + AllocationManager::Index AllocationManager::allocate() { - throw std::runtime_error("Memory segment with name " + map.name + " already exists."); + auto ret = start_; + ++start_; + return ret; } - name_to_index_[map.name] = map.index; - map.start = start_; - start_ += size; + void AllocationManager::allocate(String const& name, Index const& size, bool value_only) + { + if (resources_.find(name) != resources_.end()) + { + throw std::runtime_error("Resource with name " + name + " already exists."); + } + + resources_[name].resize(size); + for (auto& v : resources_[name]) + { + v = nullptr; + } + + // Creating a memory segment mappign in case we are dealing with qubits + if (!value_only) + { + MemoryMapping map; + map.name = name; + map.index = mappings_.size(); + map.size = size; + + if (name_to_index_.find(map.name) != name_to_index_.end()) + { + throw std::runtime_error("Memory segment with name " + map.name + " already exists."); + } + + name_to_index_[map.name] = map.index; + map.start = start_; + start_ += size; + + map.end = map.start + size; + mappings_.emplace_back(std::move(map)); + } + } - map.end = map.start + size; - mappings_.emplace_back(std::move(map)); - } -} + AllocationManager::Resource& AllocationManager::get(String const& name) + { + auto it = resources_.find(name); + if (it == resources_.end()) + { + throw std::runtime_error("Resource with name " + name + " does not exists."); + } + return it->second; + } -AllocationManager::Resource &AllocationManager::get(String const &name) -{ - auto it = resources_.find(name); - if (it == resources_.end()) - { - throw std::runtime_error("Resource with name " + name + " does not exists."); - } - return it->second; -} - -AllocationManager::Index AllocationManager::getOffset(String const &name) const -{ - auto it = name_to_index_.find(name); - if (it == name_to_index_.end()) - { - throw std::runtime_error("Memory segment with name " + name + " not found."); - } - auto index = it->second; + AllocationManager::Index AllocationManager::getOffset(String const& name) const + { + auto it = name_to_index_.find(name); + if (it == name_to_index_.end()) + { + throw std::runtime_error("Memory segment with name " + name + " not found."); + } + auto index = it->second; + + return mappings_[index].start; + } - return mappings_[index].start; -} + void AllocationManager::release(String const& name) + { + auto it = name_to_index_.find(name); + if (it == name_to_index_.end()) + { + throw std::runtime_error("Memory segment with name " + name + " not found."); + } + name_to_index_.erase(it); + + auto it2 = resources_.find(name); + if (it2 == resources_.end()) + { + throw std::runtime_error("Resource with name " + name + " does not exists."); + } + resources_.erase(it2); + } -void AllocationManager::release(String const &name) -{ - auto it = name_to_index_.find(name); - if (it == name_to_index_.end()) - { - throw std::runtime_error("Memory segment with name " + name + " not found."); - } - name_to_index_.erase(it); - - auto it2 = resources_.find(name); - if (it2 == resources_.end()) - { - throw std::runtime_error("Resource with name " + name + " does not exists."); - } - resources_.erase(it2); -} - -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/AllocationManager/AllocationManager.hpp b/src/Passes/Source/AllocationManager/AllocationManager.hpp index 909736614f..5d4dc3753c 100644 --- a/src/Passes/Source/AllocationManager/AllocationManager.hpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.hpp @@ -1,6 +1,7 @@ #pragma once // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. + #include "Llvm/Llvm.hpp" #include @@ -8,86 +9,88 @@ #include #include -namespace microsoft { -namespace quantum { - -class AllocationManager +namespace microsoft { -public: - /// Defines a named register/memory segment with start - /// position, end position and size. - struct MemoryMapping - { - using Index = uint64_t; - using String = std::string; - - String name{""}; ///< Name of the segment, if any given - Index index{0}; ///< Index of the allocation - Index size{0}; ///< Size of memory segment - Index start{0}; ///< Start index of memory segment - Index end{0}; ///< Index not included in memory segment - }; - - using Index = uint64_t; - using String = std::string; - using AllocationManagerPtr = std::shared_ptr; - using Resource = std::vector; - using Resources = std::unordered_map; - using NameToIndex = std::unordered_map; - using Mappings = std::vector; - - /// Pointer contstruction - /// @{ - /// Creates a new allocation manager. The manager is kept - /// as a shared pointer to enable allocation accross diffent - /// passes and/or replacement rules. - static AllocationManagerPtr createNew(); - /// @} - - /// Allocation and release functions - /// @{ - /// Allocates a single address. - Index allocate(); - - /// Allocates a name segment of a given size. - void allocate(String const &name, Index const &size, bool value_only = false); - - /// Gets the offset of a name segment or address. - Index getOffset(String const &name) const; - - /// Releases the named segment or address. - void release(String const &name); - - /// Retrieves a named resource. - Resource &get(String const &name); - /// @} - -private: - /// Private constructors - /// @{ - /// Public construction of this object is only allowed - /// as a shared pointer. To create a new AllocationManager, - /// use AllocationManager::createNew(). - AllocationManager() = default; - /// @} - - /// Memory mapping - /// @{ - /// Each allocation has a register/memory mapping which - /// keeps track of the - NameToIndex name_to_index_; - Mappings mappings_; - /// @} - - /// Compile-time resources - /// @{ - /// Compile-time allocated resources such as - /// arrays who - Resources resources_; - /// @} - - Index start_{0}; -}; - -} // namespace quantum -} // namespace microsoft +namespace quantum +{ + + class AllocationManager + { + public: + /// Defines a named register/memory segment with start + /// position, end position and size. + struct MemoryMapping + { + using Index = uint64_t; + using String = std::string; + + String name{""}; ///< Name of the segment, if any given + Index index{0}; ///< Index of the allocation + Index size{0}; ///< Size of memory segment + Index start{0}; ///< Start index of memory segment + Index end{0}; ///< Index not included in memory segment + }; + + using Index = uint64_t; + using String = std::string; + using AllocationManagerPtr = std::shared_ptr; + using Resource = std::vector; + using Resources = std::unordered_map; + using NameToIndex = std::unordered_map; + using Mappings = std::vector; + + /// Pointer contstruction + /// @{ + /// Creates a new allocation manager. The manager is kept + /// as a shared pointer to enable allocation accross diffent + /// passes and/or replacement rules. + static AllocationManagerPtr createNew(); + /// @} + + /// Allocation and release functions + /// @{ + /// Allocates a single address. + Index allocate(); + + /// Allocates a name segment of a given size. + void allocate(String const& name, Index const& size, bool value_only = false); + + /// Gets the offset of a name segment or address. + Index getOffset(String const& name) const; + + /// Releases the named segment or address. + void release(String const& name); + + /// Retrieves a named resource. + Resource& get(String const& name); + /// @} + + private: + /// Private constructors + /// @{ + /// Public construction of this object is only allowed + /// as a shared pointer. To create a new AllocationManager, + /// use AllocationManager::createNew(). + AllocationManager() = default; + /// @} + + /// Memory mapping + /// @{ + /// Each allocation has a register/memory mapping which + /// keeps track of the + NameToIndex name_to_index_; + Mappings mappings_; + /// @} + + /// Compile-time resources + /// @{ + /// Compile-time allocated resources such as + /// arrays who + Resources resources_; + /// @} + + Index start_{0}; + }; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp b/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp index 4ff608ff0e..4a0b5b32bf 100644 --- a/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp +++ b/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp @@ -5,23 +5,25 @@ #include "Llvm/Llvm.hpp" -namespace microsoft { -namespace quantum { - -LlvmAnalyser::LlvmAnalyser(bool debug) - : loop_analysis_manager{debug} - , function_analysis_manager{debug} - , gscc_analysis_manager{debug} - , module_analysis_manager{debug} +namespace microsoft +{ +namespace quantum { - pass_builder.registerModuleAnalyses(module_analysis_manager); - pass_builder.registerCGSCCAnalyses(gscc_analysis_manager); - pass_builder.registerFunctionAnalyses(function_analysis_manager); - pass_builder.registerLoopAnalyses(loop_analysis_manager); - pass_builder.crossRegisterProxies(loop_analysis_manager, function_analysis_manager, - gscc_analysis_manager, module_analysis_manager); -} + LlvmAnalyser::LlvmAnalyser(bool debug) + : loop_analysis_manager{debug} + , function_analysis_manager{debug} + , gscc_analysis_manager{debug} + , module_analysis_manager{debug} + { + pass_builder.registerModuleAnalyses(module_analysis_manager); + pass_builder.registerCGSCCAnalyses(gscc_analysis_manager); + pass_builder.registerFunctionAnalyses(function_analysis_manager); + pass_builder.registerLoopAnalyses(loop_analysis_manager); + + pass_builder.crossRegisterProxies( + loop_analysis_manager, function_analysis_manager, gscc_analysis_manager, module_analysis_manager); + } -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp b/src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp index eaa2666ca4..f90ba2b635 100644 --- a/src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp +++ b/src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp @@ -4,39 +4,41 @@ #include "Llvm/Llvm.hpp" -namespace microsoft { -namespace quantum { - -struct LlvmAnalyser +namespace microsoft +{ +namespace quantum { - /// Constructors - /// @{ - explicit LlvmAnalyser(bool debug); - - // Default construction not allowed as this leads - // to invalid configuration of the managers. - LlvmAnalyser() = delete; - - // Copy construction prohibited due to restrictions - // on the member variables. - LlvmAnalyser(LlvmAnalyser const &) = delete; - - // Prefer move construction at all times. - LlvmAnalyser(LlvmAnalyser &&) = default; - - // Default deconstruction. - ~LlvmAnalyser() = default; - /// @} - - /// Objects used to run a set of passes - /// @{ - llvm::PassBuilder pass_builder; - llvm::LoopAnalysisManager loop_analysis_manager; - llvm::FunctionAnalysisManager function_analysis_manager; - llvm::CGSCCAnalysisManager gscc_analysis_manager; - llvm::ModuleAnalysisManager module_analysis_manager; - /// @} -}; - -} // namespace quantum -} // namespace microsoft + + struct LlvmAnalyser + { + /// Constructors + /// @{ + explicit LlvmAnalyser(bool debug); + + // Default construction not allowed as this leads + // to invalid configuration of the managers. + LlvmAnalyser() = delete; + + // Copy construction prohibited due to restrictions + // on the member variables. + LlvmAnalyser(LlvmAnalyser const&) = delete; + + // Prefer move construction at all times. + LlvmAnalyser(LlvmAnalyser&&) = default; + + // Default deconstruction. + ~LlvmAnalyser() = default; + /// @} + + /// Objects used to run a set of passes + /// @{ + llvm::PassBuilder pass_builder; + llvm::LoopAnalysisManager loop_analysis_manager; + llvm::FunctionAnalysisManager function_analysis_manager; + llvm::CGSCCAnalysisManager gscc_analysis_manager; + llvm::ModuleAnalysisManager module_analysis_manager; + /// @} + }; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Apps/Qat/Qat.cpp b/src/Passes/Source/Apps/Qat/Qat.cpp index 994fff212b..20a3ba6079 100644 --- a/src/Passes/Source/Apps/Qat/Qat.cpp +++ b/src/Passes/Source/Apps/Qat/Qat.cpp @@ -4,10 +4,11 @@ #include "Apps/Qat/LlvmAnalysis.hpp" #include "Commandline/ParameterParser.hpp" #include "Commandline/Settings.hpp" -#include "Llvm/Llvm.hpp" #include "Profiles/BaseProfile.hpp" #include "Profiles/IProfile.hpp" +#include "Llvm/Llvm.hpp" + #include #include #include @@ -15,92 +16,92 @@ using namespace llvm; using namespace microsoft::quantum; -int main(int argc, char **argv) +int main(int argc, char** argv) { - // Parsing commmandline arguments - Settings settings{{{"debug", "false"}, - {"generate", "false"}, - {"validate", "false"}, - {"profile", "base-profile"}, - {"S", "false"}}}; - - ParameterParser parser(settings); - parser.addFlag("debug"); - parser.addFlag("generate"); - parser.addFlag("validate"); - parser.addFlag("S"); - - parser.parseArgs(argc, argv); - - if (parser.arguments().empty()) - { - std::cerr << "Usage: " << argv[0] << " [options] filename" << std::endl; - exit(-1); - } - - // Loading IR - LLVMContext context; - SMDiagnostic error; - auto module = parseIRFile(parser.getArg(0), error, context); - - if (!module) - { - std::cerr << "Invalid IR." << std::endl; - exit(-1); - } - - // Extracting commandline parameters - bool debug = settings.get("debug") == "true"; - bool generate = settings.get("generate") == "true"; - bool validate = settings.get("validate") == "true"; - auto optimisation_level = llvm::PassBuilder::OptimizationLevel::O1; - std::shared_ptr profile = std::make_shared(); - - // In case we debug, we also print the settings to allow provide a full - // picture of what is going. - if (debug) - { - settings.print(); - } - - // Checking if we are asked to generate a new QIR. If so, we will use - // the profile to setup passes to - if (generate) - { - // Creating pass builder - LlvmAnalyser analyser{debug}; - - // Preparing pass for generation based on profile - profile->addFunctionAnalyses(analyser.function_analysis_manager); - auto module_pass_manager = - profile->createGenerationModulePass(analyser.pass_builder, optimisation_level, debug); - - // Running the pass built by the profile - module_pass_manager.run(*module, analyser.module_analysis_manager); - - // Priniting either human readible LL code or byte - // code as a result, depending on the users preference. - if (settings.get("S") == "true") + // Parsing commmandline arguments + Settings settings{ + {{"debug", "false"}, + {"generate", "false"}, + {"validate", "false"}, + {"profile", "base-profile"}, + {"S", "false"}}}; + + ParameterParser parser(settings); + parser.addFlag("debug"); + parser.addFlag("generate"); + parser.addFlag("validate"); + parser.addFlag("S"); + + parser.parseArgs(argc, argv); + + if (parser.arguments().empty()) + { + std::cerr << "Usage: " << argv[0] << " [options] filename" << std::endl; + exit(-1); + } + + // Loading IR + LLVMContext context; + SMDiagnostic error; + auto module = parseIRFile(parser.getArg(0), error, context); + + if (!module) { - llvm::errs() << *module << "\n"; + std::cerr << "Invalid IR." << std::endl; + exit(-1); } - else + + // Extracting commandline parameters + bool debug = settings.get("debug") == "true"; + bool generate = settings.get("generate") == "true"; + bool validate = settings.get("validate") == "true"; + auto optimisation_level = llvm::PassBuilder::OptimizationLevel::O1; + std::shared_ptr profile = std::make_shared(); + + // In case we debug, we also print the settings to allow provide a full + // picture of what is going. + if (debug) { - llvm::errs() - << "Byte code ouput is not supported yet. Please add -S to get human readible LL code.\n"; + settings.print(); } - } - if (validate) - { - // Creating pass builder - LlvmAnalyser analyser{debug}; + // Checking if we are asked to generate a new QIR. If so, we will use + // the profile to setup passes to + if (generate) + { + // Creating pass builder + LlvmAnalyser analyser{debug}; + + // Preparing pass for generation based on profile + profile->addFunctionAnalyses(analyser.function_analysis_manager); + auto module_pass_manager = + profile->createGenerationModulePass(analyser.pass_builder, optimisation_level, debug); + + // Running the pass built by the profile + module_pass_manager.run(*module, analyser.module_analysis_manager); + + // Priniting either human readible LL code or byte + // code as a result, depending on the users preference. + if (settings.get("S") == "true") + { + llvm::errs() << *module << "\n"; + } + else + { + llvm::errs() << "Byte code ouput is not supported yet. Please add -S to get human readible LL code.\n"; + } + } - // Creating a validation pass manager - auto module_pass_manager = - profile->createValidationModulePass(analyser.pass_builder, optimisation_level, debug); - module_pass_manager.run(*module, analyser.module_analysis_manager); - } + if (validate) + { + // Creating pass builder + LlvmAnalyser analyser{debug}; + + // Creating a validation pass manager + auto module_pass_manager = + profile->createValidationModulePass(analyser.pass_builder, optimisation_level, debug); + module_pass_manager.run(*module, analyser.module_analysis_manager); + } - return 0; + return 0; } diff --git a/src/Passes/Source/Commandline/ParameterParser.cpp b/src/Passes/Source/Commandline/ParameterParser.cpp index b8cd011f86..73729adf10 100644 --- a/src/Passes/Source/Commandline/ParameterParser.cpp +++ b/src/Passes/Source/Commandline/ParameterParser.cpp @@ -6,92 +6,95 @@ #include #include -namespace microsoft { -namespace quantum { - -ParameterParser::ParameterParser(Settings &settings) - : settings_{settings} -{} - -void ParameterParser::parseArgs(int argc, char **argv) +namespace microsoft +{ +namespace quantum { - uint64_t i = 1; - std::vector values; - while (i < static_cast(argc)) - { - values.push_back(parseSingleArg(argv[i])); - ++i; - } - - i = 0; - while (i < values.size()) - { - auto &v = values[i]; - ++i; - - if (!v.is_key) + + ParameterParser::ParameterParser(Settings& settings) + : settings_{settings} { - arguments_.push_back(v.value); - continue; } - if (i >= values.size()) + void ParameterParser::parseArgs(int argc, char** argv) { - settings_[v.value] = "true"; - continue; + uint64_t i = 1; + std::vector values; + while (i < static_cast(argc)) + { + values.push_back(parseSingleArg(argv[i])); + ++i; + } + + i = 0; + while (i < values.size()) + { + auto& v = values[i]; + ++i; + + if (!v.is_key) + { + arguments_.push_back(v.value); + continue; + } + + if (i >= values.size()) + { + settings_[v.value] = "true"; + continue; + } + + auto& v2 = values[i]; + if (!v2.is_key && hasValue(v.value)) + { + settings_[v.value] = v2.value; + ++i; + continue; + } + + settings_[v.value] = "true"; + } } - auto &v2 = values[i]; - if (!v2.is_key && hasValue(v.value)) + void ParameterParser::addFlag(String const& v) { - settings_[v.value] = v2.value; - ++i; - continue; + flags_.insert(v); } - settings_[v.value] = "true"; - } -} - -void ParameterParser::addFlag(String const &v) -{ - flags_.insert(v); -} + ParameterParser::Arguments const& ParameterParser::arguments() const + { + return arguments_; + } + ParameterParser::String const& ParameterParser::getArg(uint64_t const& n) + { + return arguments_[n]; + } -ParameterParser::Arguments const &ParameterParser::arguments() const -{ - return arguments_; -} -ParameterParser::String const &ParameterParser::getArg(uint64_t const &n) -{ - return arguments_[n]; -} + ParameterParser::ParsedValue ParameterParser::parseSingleArg(String key) + { + bool is_key = false; + if (key.size() > 2 && key.substr(0, 2) == "--") + { + is_key = true; + key = key.substr(2); + } + else if (key.size() > 1 && key.substr(0, 1) == "-") + { + is_key = true; + key = key.substr(1); + } + return {is_key, key}; + } -ParameterParser::ParsedValue ParameterParser::parseSingleArg(String key) -{ - bool is_key = false; - if (key.size() > 2 && key.substr(0, 2) == "--") - { - is_key = true; - key = key.substr(2); - } - else if (key.size() > 1 && key.substr(0, 1) == "-") - { - is_key = true; - key = key.substr(1); - } - return {is_key, key}; -} - -bool ParameterParser::hasValue(String const &key) -{ - if (flags_.find(key) != flags_.end()) - { - return false; - } + bool ParameterParser::hasValue(String const& key) + { + if (flags_.find(key) != flags_.end()) + { + return false; + } - return true; -} + return true; + } -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Commandline/ParameterParser.hpp b/src/Passes/Source/Commandline/ParameterParser.hpp index 269b5e29a3..ba002eff0c 100644 --- a/src/Passes/Source/Commandline/ParameterParser.hpp +++ b/src/Passes/Source/Commandline/ParameterParser.hpp @@ -9,71 +9,73 @@ #include #include -namespace microsoft { -namespace quantum { - -class ParameterParser +namespace microsoft +{ +namespace quantum { -public: - using String = std::string; - using Arguments = std::vector; - using Flags = std::unordered_set; - /// Construction and deconstrution configuration - /// @{ - /// Parameter parsers requires a setting class to store - /// parameters passed. - explicit ParameterParser(Settings &settings); + class ParameterParser + { + public: + using String = std::string; + using Arguments = std::vector; + using Flags = std::unordered_set; + + /// Construction and deconstrution configuration + /// @{ + /// Parameter parsers requires a setting class to store + /// parameters passed. + explicit ParameterParser(Settings& settings); - // Allow move semantics only. No default construction - ParameterParser() = delete; - ParameterParser(ParameterParser const &other) = delete; - ParameterParser(ParameterParser &&other) = default; - ~ParameterParser() = default; - /// @} + // Allow move semantics only. No default construction + ParameterParser() = delete; + ParameterParser(ParameterParser const& other) = delete; + ParameterParser(ParameterParser&& other) = default; + ~ParameterParser() = default; + /// @} - /// Configuration - /// @{ + /// Configuration + /// @{ - /// Marks a name as a flag (as opposed to an option). - /// This ensures that no parameter is expected after - /// the flag is specified. For instance `--debug` is - /// a flag as opposed to `--log-level 3` which is an - /// option. - void addFlag(String const &v); - /// @} + /// Marks a name as a flag (as opposed to an option). + /// This ensures that no parameter is expected after + /// the flag is specified. For instance `--debug` is + /// a flag as opposed to `--log-level 3` which is an + /// option. + void addFlag(String const& v); + /// @} - /// Operation - /// @{ - /// Parses the command line arguments given the argc and argv - /// from the main function. - void parseArgs(int argc, char **argv); + /// Operation + /// @{ + /// Parses the command line arguments given the argc and argv + /// from the main function. + void parseArgs(int argc, char** argv); - /// Returns list of arguments without flags and/or options - /// included. - Arguments const &arguments() const; - String const & getArg(uint64_t const &n); - /// @} -private: - struct ParsedValue - { - bool is_key{false}; - String value; - }; + /// Returns list of arguments without flags and/or options + /// included. + Arguments const& arguments() const; + String const& getArg(uint64_t const& n); + /// @} + private: + struct ParsedValue + { + bool is_key{false}; + String value; + }; - /// Helper functions and variables - /// @{ - ParsedValue parseSingleArg(String key); - bool hasValue(String const &key); - Flags flags_{}; - /// @} + /// Helper functions and variables + /// @{ + ParsedValue parseSingleArg(String key); + bool hasValue(String const& key); + Flags flags_{}; + /// @} - /// Storage of parsed data - /// @{ - Settings &settings_; - Arguments arguments_{}; - /// @} -}; + /// Storage of parsed data + /// @{ + Settings& settings_; + Arguments arguments_{}; + /// @} + }; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Commandline/Settings.hpp b/src/Passes/Source/Commandline/Settings.hpp index 4c4bc6fdd6..84aa3b2ce3 100644 --- a/src/Passes/Source/Commandline/Settings.hpp +++ b/src/Passes/Source/Commandline/Settings.hpp @@ -1,63 +1,68 @@ #pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. #include #include #include #include -namespace microsoft { -namespace quantum { - -class Settings +namespace microsoft +{ +namespace quantum { -public: - using String = std::string; - - using SettingsMap = std::unordered_map; - Settings(SettingsMap default_settings) - : settings_{default_settings} - {} - - String get(String const &name, String const &default_value) - { - auto it = settings_.find(name); - if (it == settings_.end()) + + class Settings { - return default_value; - } + public: + using String = std::string; - return it->second; - } + using SettingsMap = std::unordered_map; + Settings(SettingsMap default_settings) + : settings_{default_settings} + { + } - String get(String const &name) - { - auto it = settings_.find(name); - if (it == settings_.end()) - { - throw std::runtime_error("Could not find setting '" + name + "'."); - } + String get(String const& name, String const& default_value) + { + auto it = settings_.find(name); + if (it == settings_.end()) + { + return default_value; + } - return it->second; - } + return it->second; + } - void print() - { - std::cout << "Settings" << std::endl; - for (auto &s : settings_) - { - std::cout << std::setw(20) << s.first << ": " << s.second << std::endl; - } - } - - String &operator[](String const &key) - { - return settings_[key]; - } - -private: - SettingsMap settings_; - friend class ParameterParser; -}; - -} // namespace quantum -} // namespace microsoft + String get(String const& name) + { + auto it = settings_.find(name); + if (it == settings_.end()) + { + throw std::runtime_error("Could not find setting '" + name + "'."); + } + + return it->second; + } + + void print() + { + std::cout << "Settings" << std::endl; + for (auto& s : settings_) + { + std::cout << std::setw(20) << s.first << ": " << s.second << std::endl; + } + } + + String& operator[](String const& key) + { + return settings_[key]; + } + + private: + SettingsMap settings_; + friend class ParameterParser; + }; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp index a4b4b484e5..45b97a3aee 100644 --- a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp +++ b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp @@ -8,169 +8,173 @@ #include #include -namespace microsoft { -namespace quantum { -llvm::PreservedAnalyses ExpandStaticAllocationPass::run(llvm::Function & function, - llvm::FunctionAnalysisManager &fam) +namespace microsoft { - // Pass body - for (auto &basic_block : function) - { - // Keeping track of instructions to remove in each block - std::vector to_remove; - - for (auto &instruction : basic_block) +namespace quantum +{ + llvm::PreservedAnalyses ExpandStaticAllocationPass::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& fam) { - // Finding calls - auto *call_instr = llvm::dyn_cast(&instruction); - if (call_instr == nullptr) - { - continue; - } - - ConstantArguments argument_constants{}; - std::vector remaining_arguments{}; - - auto callee_function = call_instr->getCalledFunction(); - auto &use_quantum = fam.getResult(*callee_function); - - if (use_quantum.value) - { - uint32_t idx = 0; - auto n = static_cast(callee_function->arg_size()); - - // Finding argument constants - while (idx < n) + // Pass body + for (auto& basic_block : function) { - auto arg = callee_function->getArg(idx); - auto value = call_instr->getArgOperand(idx); - - auto cst = llvm::dyn_cast(value); - if (cst != nullptr) - { - argument_constants[arg->getName().str()] = cst; - } - else - { - remaining_arguments.push_back(idx); - } - - ++idx; + // Keeping track of instructions to remove in each block + std::vector to_remove; + + for (auto& instruction : basic_block) + { + // Finding calls + auto* call_instr = llvm::dyn_cast(&instruction); + if (call_instr == nullptr) + { + continue; + } + + ConstantArguments argument_constants{}; + std::vector remaining_arguments{}; + + auto callee_function = call_instr->getCalledFunction(); + auto& use_quantum = fam.getResult(*callee_function); + + if (use_quantum.value) + { + uint32_t idx = 0; + auto n = static_cast(callee_function->arg_size()); + + // Finding argument constants + while (idx < n) + { + auto arg = callee_function->getArg(idx); + auto value = call_instr->getArgOperand(idx); + + auto cst = llvm::dyn_cast(value); + if (cst != nullptr) + { + argument_constants[arg->getName().str()] = cst; + } + else + { + remaining_arguments.push_back(idx); + } + + ++idx; + } + + // Checking which arrays are constant for this + auto new_callee = expandFunctionCall(*callee_function, argument_constants); + + // Replacing call if a new function was created + if (new_callee != nullptr) + { + llvm::IRBuilder<> builder(call_instr); + + // List with new call arguments + std::vector new_arguments; + for (auto const& i : remaining_arguments) + { + // Getting the i'th argument + llvm::Value* arg = call_instr->getArgOperand(i); + + // Adding arguments that were not constant + if (argument_constants.find(arg->getName().str()) == argument_constants.end()) + { + new_arguments.push_back(arg); + } + } + + // Creating a new call + llvm::Value* new_call = builder.CreateCall(new_callee, new_arguments); + new_call->takeName(call_instr); + + // Replace all calls to old function with calls to new function + instruction.replaceAllUsesWith(new_call); + + // Schedule original instruction for deletion + to_remove.push_back(&instruction); + } + } + } + + // Removing instructions + for (auto& instruction : to_remove) + { + if (!instruction->use_empty()) + { + instruction->replaceAllUsesWith(llvm::UndefValue::get(instruction->getType())); + } + instruction->eraseFromParent(); + } } - // Checking which arrays are constant for this - auto new_callee = expandFunctionCall(*callee_function, argument_constants); + return llvm::PreservedAnalyses::none(); + } - // Replacing call if a new function was created - if (new_callee != nullptr) + llvm::Function* ExpandStaticAllocationPass::expandFunctionCall( + llvm::Function& callee, + ConstantArguments const& const_args) + { + auto module = callee.getParent(); + auto& context = module->getContext(); + llvm::IRBuilder<> builder(context); + + // Copying the original function + llvm::ValueToValueMapTy remapper; + std::vector arg_types; + + // The user might be deleting arguments to the function by specifying them in + // the VMap. If so, we need to not add the arguments to the arg ty vector + // + for (auto const& arg : callee.args()) { - llvm::IRBuilder<> builder(call_instr); - - // List with new call arguments - std::vector new_arguments; - for (auto const &i : remaining_arguments) - { - // Getting the i'th argument - llvm::Value *arg = call_instr->getArgOperand(i); + // Skipping constant arguments - // Adding arguments that were not constant - if (argument_constants.find(arg->getName().str()) == argument_constants.end()) + if (const_args.find(arg.getName().str()) != const_args.end()) { - new_arguments.push_back(arg); + continue; } - } - - // Creating a new call - llvm::Value *new_call = builder.CreateCall(new_callee, new_arguments); - new_call->takeName(call_instr); - - // Replace all calls to old function with calls to new function - instruction.replaceAllUsesWith(new_call); - // Schedule original instruction for deletion - to_remove.push_back(&instruction); + arg_types.push_back(arg.getType()); } - } - } - // Removing instructions - for (auto &instruction : to_remove) - { - if (!instruction->use_empty()) - { - instruction->replaceAllUsesWith(llvm::UndefValue::get(instruction->getType())); - } - instruction->eraseFromParent(); - } - } + // Creating a new function + llvm::FunctionType* function_type = llvm::FunctionType::get( + callee.getFunctionType()->getReturnType(), arg_types, callee.getFunctionType()->isVarArg()); + auto function = llvm::Function::Create( + function_type, callee.getLinkage(), callee.getAddressSpace(), callee.getName(), module); - return llvm::PreservedAnalyses::none(); -} + // Copying the non-const arguments + auto dest_args_it = function->arg_begin(); -llvm::Function *ExpandStaticAllocationPass::expandFunctionCall(llvm::Function & callee, - ConstantArguments const &const_args) -{ - auto module = callee.getParent(); - auto & context = module->getContext(); - llvm::IRBuilder<> builder(context); - - // Copying the original function - llvm::ValueToValueMapTy remapper; - std::vector arg_types; - - // The user might be deleting arguments to the function by specifying them in - // the VMap. If so, we need to not add the arguments to the arg ty vector - // - for (auto const &arg : callee.args()) - { - // Skipping constant arguments - - if (const_args.find(arg.getName().str()) != const_args.end()) - { - continue; - } + for (auto const& arg : callee.args()) + { + auto const_it = const_args.find(arg.getName().str()); + if (const_it == const_args.end()) + { + // Mapping remaining function arguments + dest_args_it->setName(arg.getName()); + remapper[&arg] = &*dest_args_it++; + } + else + { + remapper[&arg] = llvm::ConstantInt::get(context, const_it->second->getValue()); + } + } - arg_types.push_back(arg.getType()); - } + llvm::SmallVector returns; // Ignore returns cloned. - // Creating a new function - llvm::FunctionType *function_type = llvm::FunctionType::get( - callee.getFunctionType()->getReturnType(), arg_types, callee.getFunctionType()->isVarArg()); - auto function = llvm::Function::Create(function_type, callee.getLinkage(), - callee.getAddressSpace(), callee.getName(), module); + // TODO(tfr): In LLVM 13 upgrade 'true' to 'llvm::CloneFunctionChangeType::LocalChangesOnly' + llvm::CloneFunctionInto(function, &callee, remapper, true, returns, "", nullptr); - // Copying the non-const arguments - auto dest_args_it = function->arg_begin(); + verifyFunction(*function); - for (auto const &arg : callee.args()) - { - auto const_it = const_args.find(arg.getName().str()); - if (const_it == const_args.end()) - { - // Mapping remaining function arguments - dest_args_it->setName(arg.getName()); - remapper[&arg] = &*dest_args_it++; + return function; } - else + + bool ExpandStaticAllocationPass::isRequired() { - remapper[&arg] = llvm::ConstantInt::get(context, const_it->second->getValue()); + return true; } - } - - llvm::SmallVector returns; // Ignore returns cloned. - - // TODO(tfr): In LLVM 13 upgrade 'true' to 'llvm::CloneFunctionChangeType::LocalChangesOnly' - llvm::CloneFunctionInto(function, &callee, remapper, true, returns, "", nullptr); - - verifyFunction(*function); - - return function; -} - -bool ExpandStaticAllocationPass::isRequired() -{ - return true; -} -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp index b4d34e98a1..dcad921310 100644 --- a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp +++ b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp @@ -2,44 +2,47 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" -#include +#include "Llvm/Llvm.hpp" -namespace microsoft { -namespace quantum { +#include -class ExpandStaticAllocationPass : public llvm::PassInfoMixin +namespace microsoft +{ +namespace quantum { -public: - using QubitAllocationResult = QubitAllocationAnalysisAnalytics::Result; - using ConstantArguments = std::unordered_map; - - /// Constructors and destructors - /// @{ - ExpandStaticAllocationPass() = default; - ExpandStaticAllocationPass(ExpandStaticAllocationPass const &) = default; - ExpandStaticAllocationPass(ExpandStaticAllocationPass &&) = default; - ~ExpandStaticAllocationPass() = default; - /// @} - - /// Operators - /// @{ - ExpandStaticAllocationPass &operator=(ExpandStaticAllocationPass const &) = default; - ExpandStaticAllocationPass &operator=(ExpandStaticAllocationPass &&) = default; - /// @} - - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); - static bool isRequired(); - /// @} - - /// @{ - llvm::Function *expandFunctionCall(llvm::Function &callee, ConstantArguments const &const_args); - /// @} -}; - -} // namespace quantum -} // namespace microsoft + + class ExpandStaticAllocationPass : public llvm::PassInfoMixin + { + public: + using QubitAllocationResult = QubitAllocationAnalysisAnalytics::Result; + using ConstantArguments = std::unordered_map; + + /// Constructors and destructors + /// @{ + ExpandStaticAllocationPass() = default; + ExpandStaticAllocationPass(ExpandStaticAllocationPass const&) = default; + ExpandStaticAllocationPass(ExpandStaticAllocationPass&&) = default; + ~ExpandStaticAllocationPass() = default; + /// @} + + /// Operators + /// @{ + ExpandStaticAllocationPass& operator=(ExpandStaticAllocationPass const&) = default; + ExpandStaticAllocationPass& operator=(ExpandStaticAllocationPass&&) = default; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} + + /// @{ + llvm::Function* expandFunctionCall(llvm::Function& callee, ConstantArguments const& const_args); + /// @} + }; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/ExpandStaticAllocation/LibExpandStaticAllocation.cpp b/src/Passes/Source/Passes/ExpandStaticAllocation/LibExpandStaticAllocation.cpp index b4225f154a..56c72d6b03 100644 --- a/src/Passes/Source/Passes/ExpandStaticAllocation/LibExpandStaticAllocation.cpp +++ b/src/Passes/Source/Passes/ExpandStaticAllocation/LibExpandStaticAllocation.cpp @@ -1,37 +1,38 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace { +namespace +{ llvm::PassPluginLibraryInfo getExpandStaticAllocationPluginInfo() { - using namespace microsoft::quantum; - using namespace llvm; + using namespace microsoft::quantum; + using namespace llvm; - return { - LLVM_PLUGIN_API_VERSION, "ExpandStaticAllocation", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the pass - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "expand-static-allocation") - { - fpm.addPass(ExpandStaticAllocationPass()); - return true; - } + return {LLVM_PLUGIN_API_VERSION, "ExpandStaticAllocation", LLVM_VERSION_STRING, [](PassBuilder& pb) { + // Registering the pass + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { + if (name == "expand-static-allocation") + { + fpm.addPass(ExpandStaticAllocationPass()); + return true; + } - return false; - }); - }}; + return false; + }); + }}; } -} // namespace +} // namespace // Interface for loading the plugin extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getExpandStaticAllocationPluginInfo(); + return getExpandStaticAllocationPluginInfo(); } diff --git a/src/Passes/Source/Passes/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp b/src/Passes/Source/Passes/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp index 3cd8afbd9e..bb9f5529c9 100644 --- a/src/Passes/Source/Passes/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp +++ b/src/Passes/Source/Passes/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp @@ -1,47 +1,48 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace { +namespace +{ // Interface to plugin llvm::PassPluginLibraryInfo getQubitAllocationAnalysisPluginInfo() { - using namespace microsoft::quantum; - using namespace llvm; - - return { - LLVM_PLUGIN_API_VERSION, "QubitAllocationAnalysis", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering a printer for the anaylsis - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "print") - { - fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); - return true; - } - return false; - }); - - pb.registerVectorizerStartEPCallback( - [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { - fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); - }); - - // Registering the analysis module - pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { - fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); - }); - }}; + using namespace microsoft::quantum; + using namespace llvm; + + return {LLVM_PLUGIN_API_VERSION, "QubitAllocationAnalysis", LLVM_VERSION_STRING, [](PassBuilder& pb) { + // Registering a printer for the anaylsis + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { + if (name == "print") + { + fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); + return true; + } + return false; + }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager& fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { + fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); + }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager& fam) { + fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); + }); + }}; } -} // namespace +} // namespace extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getQubitAllocationAnalysisPluginInfo(); + return getQubitAllocationAnalysisPluginInfo(); } diff --git a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp index 04a1e332ec..70fc168a7f 100644 --- a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp +++ b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp @@ -9,76 +9,81 @@ #include #include -namespace microsoft { -namespace quantum { - -QubitAllocationAnalysisAnalytics::Result QubitAllocationAnalysisAnalytics::run( - llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) +namespace microsoft +{ +namespace quantum { - for (auto &basic_block : function) - { - for (auto &instr : basic_block) + + QubitAllocationAnalysisAnalytics::Result QubitAllocationAnalysisAnalytics::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& /*unused*/) { - auto call_instr = llvm::dyn_cast(&instr); - if (call_instr == nullptr) - { - continue; - } - auto target_function = call_instr->getCalledFunction(); - auto name = target_function->getName(); - // llvm::errs() << "Testing " << name << " : " << *call_instr << "\n"; - - if (name == "__quantum__rt__qubit_allocate") - { - return {true}; - } - - if (name == "__quantum__rt__qubit_allocate_array") - { - return {true}; - } - - if (name == "__quantum__qis__m__body") - { - return {true}; - } - - if (name == "__quantum__qis__z__body") - { - return {true}; - } + for (auto& basic_block : function) + { + for (auto& instr : basic_block) + { + auto call_instr = llvm::dyn_cast(&instr); + if (call_instr == nullptr) + { + continue; + } + auto target_function = call_instr->getCalledFunction(); + auto name = target_function->getName(); + // llvm::errs() << "Testing " << name << " : " << *call_instr << "\n"; + + if (name == "__quantum__rt__qubit_allocate") + { + return {true}; + } + + if (name == "__quantum__rt__qubit_allocate_array") + { + return {true}; + } + + if (name == "__quantum__qis__m__body") + { + return {true}; + } + + if (name == "__quantum__qis__z__body") + { + return {true}; + } + } + } + + return {false}; } - } - return {false}; -} + QubitAllocationAnalysisPrinter::QubitAllocationAnalysisPrinter(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) + { + } -QubitAllocationAnalysisPrinter::QubitAllocationAnalysisPrinter(llvm::raw_ostream &out_stream) - : out_stream_(out_stream) -{} + llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& fam) + { + auto& result = fam.getResult(function); + + if (result.value) + { + out_stream_ << function.getName() << " contains quantum allocations.\n"; + } + else + { + out_stream_ << function.getName() << " is logic only.\n"; + } + return llvm::PreservedAnalyses::all(); + } -llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run(llvm::Function & function, - llvm::FunctionAnalysisManager &fam) -{ - auto &result = fam.getResult(function); - - if (result.value) - { - out_stream_ << function.getName() << " contains quantum allocations.\n"; - } - else - { - out_stream_ << function.getName() << " is logic only.\n"; - } - return llvm::PreservedAnalyses::all(); -} - -bool QubitAllocationAnalysisPrinter::isRequired() -{ - return true; -} + bool QubitAllocationAnalysisPrinter::isRequired() + { + return true; + } -llvm::AnalysisKey QubitAllocationAnalysisAnalytics::Key; + llvm::AnalysisKey QubitAllocationAnalysisAnalytics::Key; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp index 3b8289bf83..58203d5d11 100644 --- a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp +++ b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp @@ -7,70 +7,71 @@ #include #include -namespace microsoft { -namespace quantum { - -class QubitAllocationAnalysisAnalytics - : public llvm::AnalysisInfoMixin +namespace microsoft +{ +namespace quantum { -public: - using String = std::string; - struct Result - { - bool value{false}; - }; + class QubitAllocationAnalysisAnalytics : public llvm::AnalysisInfoMixin + { + public: + using String = std::string; - /// Constructors and destructors - /// @{ - QubitAllocationAnalysisAnalytics() = default; - QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics const &) = delete; - QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics &&) = default; - ~QubitAllocationAnalysisAnalytics() = default; - /// @} + struct Result + { + bool value{false}; + }; - /// Operators - /// @{ - QubitAllocationAnalysisAnalytics &operator=(QubitAllocationAnalysisAnalytics const &) = delete; - QubitAllocationAnalysisAnalytics &operator=(QubitAllocationAnalysisAnalytics &&) = delete; - /// @} + /// Constructors and destructors + /// @{ + QubitAllocationAnalysisAnalytics() = default; + QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics const&) = delete; + QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics&&) = default; + ~QubitAllocationAnalysisAnalytics() = default; + /// @} - /// Functions required by LLVM - /// @{ - Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/); - /// @} + /// Operators + /// @{ + QubitAllocationAnalysisAnalytics& operator=(QubitAllocationAnalysisAnalytics const&) = delete; + QubitAllocationAnalysisAnalytics& operator=(QubitAllocationAnalysisAnalytics&&) = delete; + /// @} -private: - static llvm::AnalysisKey Key; // NOLINT - friend struct llvm::AnalysisInfoMixin; -}; + /// Functions required by LLVM + /// @{ + Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); + /// @} -class QubitAllocationAnalysisPrinter : public llvm::PassInfoMixin -{ -public: - /// Constructors and destructors - /// @{ - explicit QubitAllocationAnalysisPrinter(llvm::raw_ostream &out_stream); - QubitAllocationAnalysisPrinter() = delete; - QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter const &) = delete; - QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter &&) = default; - ~QubitAllocationAnalysisPrinter() = default; - /// @} + private: + static llvm::AnalysisKey Key; // NOLINT + friend struct llvm::AnalysisInfoMixin; + }; + + class QubitAllocationAnalysisPrinter : public llvm::PassInfoMixin + { + public: + /// Constructors and destructors + /// @{ + explicit QubitAllocationAnalysisPrinter(llvm::raw_ostream& out_stream); + QubitAllocationAnalysisPrinter() = delete; + QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter const&) = delete; + QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter&&) = default; + ~QubitAllocationAnalysisPrinter() = default; + /// @} - /// Operators - /// @{ - QubitAllocationAnalysisPrinter &operator=(QubitAllocationAnalysisPrinter const &) = delete; - QubitAllocationAnalysisPrinter &operator=(QubitAllocationAnalysisPrinter &&) = delete; - /// @} + /// Operators + /// @{ + QubitAllocationAnalysisPrinter& operator=(QubitAllocationAnalysisPrinter const&) = delete; + QubitAllocationAnalysisPrinter& operator=(QubitAllocationAnalysisPrinter&&) = delete; + /// @} - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); - static bool isRequired(); - /// @} -private: - llvm::raw_ostream &out_stream_; -}; + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} + private: + llvm::raw_ostream& out_stream_; + }; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/ResourceRemapper/LibResourceRemapper.cpp b/src/Passes/Source/Passes/ResourceRemapper/LibResourceRemapper.cpp index 29a7a8e94f..3e53131e7a 100644 --- a/src/Passes/Source/Passes/ResourceRemapper/LibResourceRemapper.cpp +++ b/src/Passes/Source/Passes/ResourceRemapper/LibResourceRemapper.cpp @@ -1,37 +1,38 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "ResourceRemapper/ResourceRemapper.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace { +namespace +{ llvm::PassPluginLibraryInfo getResourceRemapperPluginInfo() { - using namespace microsoft::quantum; - using namespace llvm; + using namespace microsoft::quantum; + using namespace llvm; - return { - LLVM_PLUGIN_API_VERSION, "ResourceRemapper", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the pass - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "resource-remapper") - { - fpm.addPass(ResourceRemapperPass()); - return true; - } + return {LLVM_PLUGIN_API_VERSION, "ResourceRemapper", LLVM_VERSION_STRING, [](PassBuilder& pb) { + // Registering the pass + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { + if (name == "resource-remapper") + { + fpm.addPass(ResourceRemapperPass()); + return true; + } - return false; - }); - }}; + return false; + }); + }}; } -} // namespace +} // namespace // Interface for loading the plugin extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getResourceRemapperPluginInfo(); + return getResourceRemapperPluginInfo(); } diff --git a/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.cpp b/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.cpp index 5bc5b44207..060e11f43b 100644 --- a/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.cpp +++ b/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.cpp @@ -8,22 +8,23 @@ #include #include -namespace microsoft { -namespace quantum { -llvm::PreservedAnalyses ResourceRemapperPass::run(llvm::Function &function, - llvm::FunctionAnalysisManager & /*fam*/) +namespace microsoft { - // Pass body +namespace quantum +{ + llvm::PreservedAnalyses ResourceRemapperPass::run(llvm::Function& function, llvm::FunctionAnalysisManager& /*fam*/) + { + // Pass body - llvm::errs() << "Implement your pass here: " << function.getName() << "\n"; + llvm::errs() << "Implement your pass here: " << function.getName() << "\n"; - return llvm::PreservedAnalyses::all(); -} + return llvm::PreservedAnalyses::all(); + } -bool ResourceRemapperPass::isRequired() -{ - return true; -} + bool ResourceRemapperPass::isRequired() + { + return true; + } -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.hpp b/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.hpp index 52b8076522..03c6e564cc 100644 --- a/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.hpp +++ b/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.hpp @@ -4,32 +4,34 @@ #include "Llvm/Llvm.hpp" -namespace microsoft { -namespace quantum { - -class ResourceRemapperPass : public llvm::PassInfoMixin +namespace microsoft +{ +namespace quantum { -public: - /// Constructors and destructors - /// @{ - ResourceRemapperPass() = default; - ResourceRemapperPass(ResourceRemapperPass const &) = default; - ResourceRemapperPass(ResourceRemapperPass &&) = default; - ~ResourceRemapperPass() = default; - /// @} - /// Operators - /// @{ - ResourceRemapperPass &operator=(ResourceRemapperPass const &) = default; - ResourceRemapperPass &operator=(ResourceRemapperPass &&) = default; - /// @} + class ResourceRemapperPass : public llvm::PassInfoMixin + { + public: + /// Constructors and destructors + /// @{ + ResourceRemapperPass() = default; + ResourceRemapperPass(ResourceRemapperPass const&) = default; + ResourceRemapperPass(ResourceRemapperPass&&) = default; + ~ResourceRemapperPass() = default; + /// @} + + /// Operators + /// @{ + ResourceRemapperPass& operator=(ResourceRemapperPass const&) = default; + ResourceRemapperPass& operator=(ResourceRemapperPass&&) = default; + /// @} - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); - static bool isRequired(); - /// @} -}; + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} + }; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp b/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp index f1e9cf2ddc..55fc5426ad 100644 --- a/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp +++ b/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp @@ -1,55 +1,56 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Passes/TransformationRule/TransformationRule.hpp" #include "Rules/Factory.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace { +namespace +{ llvm::PassPluginLibraryInfo getTransformationRulePluginInfo() { - using namespace microsoft::quantum; - using namespace llvm; - - return { - LLVM_PLUGIN_API_VERSION, "TransformationRule", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the pass - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - // Base profile - if (name == "restrict-qir") - { - RuleSet rule_set; - - // Defining the mapping - auto factory = RuleFactory(rule_set); - - factory.useStaticQubitArrayAllocation(); - factory.useStaticQubitAllocation(); - factory.useStaticResultAllocation(); - - factory.optimiseBranchQuatumOne(); - // factory.optimiseBranchQuatumZero(); - - factory.disableReferenceCounting(); - factory.disableAliasCounting(); - factory.disableStringSupport(); - - fpm.addPass(TransformationRulePass(std::move(rule_set))); - return true; - } - - return false; - }); - }}; + using namespace microsoft::quantum; + using namespace llvm; + + return {LLVM_PLUGIN_API_VERSION, "TransformationRule", LLVM_VERSION_STRING, [](PassBuilder& pb) { + // Registering the pass + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { + // Base profile + if (name == "restrict-qir") + { + RuleSet rule_set; + + // Defining the mapping + auto factory = RuleFactory(rule_set); + + factory.useStaticQubitArrayAllocation(); + factory.useStaticQubitAllocation(); + factory.useStaticResultAllocation(); + + factory.optimiseBranchQuatumOne(); + // factory.optimiseBranchQuatumZero(); + + factory.disableReferenceCounting(); + factory.disableAliasCounting(); + factory.disableStringSupport(); + + fpm.addPass(TransformationRulePass(std::move(rule_set))); + return true; + } + + return false; + }); + }}; } -} // namespace +} // namespace // Interface for loading the plugin extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getTransformationRulePluginInfo(); + return getTransformationRulePluginInfo(); } diff --git a/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp b/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp index 574cade548..8fa45abb59 100644 --- a/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp +++ b/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp @@ -8,84 +8,88 @@ #include #include -namespace microsoft { -namespace quantum { -TransformationRulePass::TransformationRulePass(RuleSet &&rule_set) - : rule_set_{std::move(rule_set)} -{} - -llvm::PreservedAnalyses TransformationRulePass::run(llvm::Function &function, - llvm::FunctionAnalysisManager & /*fam*/) +namespace microsoft { - replacements_.clear(); - - // For every instruction in every block, we attempt a match - // and replace. - for (auto &basic_block : function) - { - for (auto &instr : basic_block) +namespace quantum +{ + TransformationRulePass::TransformationRulePass(RuleSet&& rule_set) + : rule_set_{std::move(rule_set)} { - rule_set_.matchAndReplace(&instr, replacements_); } - } - // Applying all replacements - for (auto it = replacements_.rbegin(); it != replacements_.rend(); ++it) - { - auto instr1 = llvm::dyn_cast(it->first); - if (instr1 == nullptr) + llvm::PreservedAnalyses TransformationRulePass::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& /*fam*/) { - llvm::errs() << "; WARNING: cannot deal with non-instruction replacements\n"; - continue; - } + replacements_.clear(); - // Cheking if have a replacement for the instruction - if (it->second != nullptr) - { - // ... if so, we just replace it, - auto instr2 = llvm::dyn_cast(it->second); - if (instr2 == nullptr) - { - llvm::errs() << "; WARNING: cannot replace instruction with non-instruction\n"; - continue; - } - llvm::ReplaceInstWithInst(instr1, instr2); - } - else - { - // ... otherwise we delete the the instruction - // Removing all uses - if (!instr1->use_empty()) - { - instr1->replaceAllUsesWith(llvm::UndefValue::get(instr1->getType())); - } + // For every instruction in every block, we attempt a match + // and replace. + for (auto& basic_block : function) + { + for (auto& instr : basic_block) + { + rule_set_.matchAndReplace(&instr, replacements_); + } + } - // And finally we delete the instruction - instr1->eraseFromParent(); + // Applying all replacements + for (auto it = replacements_.rbegin(); it != replacements_.rend(); ++it) + { + auto instr1 = llvm::dyn_cast(it->first); + if (instr1 == nullptr) + { + llvm::errs() << "; WARNING: cannot deal with non-instruction replacements\n"; + continue; + } + + // Cheking if have a replacement for the instruction + if (it->second != nullptr) + { + // ... if so, we just replace it, + auto instr2 = llvm::dyn_cast(it->second); + if (instr2 == nullptr) + { + llvm::errs() << "; WARNING: cannot replace instruction with non-instruction\n"; + continue; + } + llvm::ReplaceInstWithInst(instr1, instr2); + } + else + { + // ... otherwise we delete the the instruction + // Removing all uses + if (!instr1->use_empty()) + { + instr1->replaceAllUsesWith(llvm::UndefValue::get(instr1->getType())); + } + + // And finally we delete the instruction + instr1->eraseFromParent(); + } + } + + /* + for (auto &basic_block : function) + { + llvm::errs() << "REPLACEMENTS DONE FOR:\n"; + llvm::errs() << basic_block << "\n\n"; + } + */ + // If we did not change the IR, we report that we preserved all + if (replacements_.empty()) + { + return llvm::PreservedAnalyses::all(); + } + + // ... and otherwise, we report that we preserved none. + return llvm::PreservedAnalyses::none(); } - } - /* - for (auto &basic_block : function) + bool TransformationRulePass::isRequired() { - llvm::errs() << "REPLACEMENTS DONE FOR:\n"; - llvm::errs() << basic_block << "\n\n"; + return true; } - */ - // If we did not change the IR, we report that we preserved all - if (replacements_.empty()) - { - return llvm::PreservedAnalyses::all(); - } - - // ... and otherwise, we report that we preserved none. - return llvm::PreservedAnalyses::none(); -} - -bool TransformationRulePass::isRequired() -{ - return true; -} -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp b/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp index e1e57db3f4..6646fd3a97 100644 --- a/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp +++ b/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp @@ -2,48 +2,51 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Rules/RuleSet.hpp" -#include +#include "Llvm/Llvm.hpp" -namespace microsoft { -namespace quantum { +#include -class TransformationRulePass : public llvm::PassInfoMixin +namespace microsoft +{ +namespace quantum { -public: - using Replacements = ReplacementRule::Replacements; - using Instruction = llvm::Instruction; - using Rules = std::vector; - using Value = llvm::Value; - using Builder = ReplacementRule::Builder; - using AllocationManagerPtr = AllocationManager::AllocationManagerPtr; - - /// Constructors and destructors - /// @{ - TransformationRulePass(RuleSet &&rule_set); - TransformationRulePass(TransformationRulePass const &) = delete; - TransformationRulePass(TransformationRulePass &&) = default; - ~TransformationRulePass() = default; - /// @} - - /// Operators - /// @{ - TransformationRulePass &operator=(TransformationRulePass const &) = delete; - TransformationRulePass &operator=(TransformationRulePass &&) = default; - /// @} - - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); - static bool isRequired(); - /// @} - -private: - RuleSet rule_set_{}; - Replacements replacements_; ///< Registered replacements to be executed. -}; - -} // namespace quantum -} // namespace microsoft + + class TransformationRulePass : public llvm::PassInfoMixin + { + public: + using Replacements = ReplacementRule::Replacements; + using Instruction = llvm::Instruction; + using Rules = std::vector; + using Value = llvm::Value; + using Builder = ReplacementRule::Builder; + using AllocationManagerPtr = AllocationManager::AllocationManagerPtr; + + /// Constructors and destructors + /// @{ + TransformationRulePass(RuleSet&& rule_set); + TransformationRulePass(TransformationRulePass const&) = delete; + TransformationRulePass(TransformationRulePass&&) = default; + ~TransformationRulePass() = default; + /// @} + + /// Operators + /// @{ + TransformationRulePass& operator=(TransformationRulePass const&) = delete; + TransformationRulePass& operator=(TransformationRulePass&&) = default; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} + + private: + RuleSet rule_set_{}; + Replacements replacements_; ///< Registered replacements to be executed. + }; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Profiles/BaseProfile.cpp b/src/Passes/Source/Profiles/BaseProfile.cpp index cfb144586d..a6d16595b8 100644 --- a/src/Passes/Source/Profiles/BaseProfile.cpp +++ b/src/Passes/Source/Profiles/BaseProfile.cpp @@ -1,89 +1,97 @@ -#include "Profiles/BaseProfile.hpp" +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp" #include "Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" #include "Passes/TransformationRule/TransformationRule.hpp" +#include "Profiles/BaseProfile.hpp" #include "Rules/Factory.hpp" -namespace microsoft { -namespace quantum { +#include "Llvm/Llvm.hpp" -llvm::ModulePassManager BaseProfile::createGenerationModulePass( - llvm::PassBuilder &pass_builder, llvm::PassBuilder::OptimizationLevel &optimisation_level, - bool debug) +namespace microsoft +{ +namespace quantum { - auto ret = pass_builder.buildPerModuleDefaultPipeline(optimisation_level); - // buildPerModuleDefaultPipeline buildModuleOptimizationPipeline - auto function_pass_manager = pass_builder.buildFunctionSimplificationPipeline( - optimisation_level, llvm::PassBuilder::ThinLTOPhase::PreLink, debug); - auto inliner_pass = pass_builder.buildInlinerPipeline( - optimisation_level, llvm::PassBuilder::ThinLTOPhase::PreLink, debug); + llvm::ModulePassManager BaseProfile::createGenerationModulePass( + llvm::PassBuilder& pass_builder, + llvm::PassBuilder::OptimizationLevel& optimisation_level, + bool debug) + { + auto ret = pass_builder.buildPerModuleDefaultPipeline(optimisation_level); + // buildPerModuleDefaultPipeline buildModuleOptimizationPipeline + auto function_pass_manager = pass_builder.buildFunctionSimplificationPipeline( + optimisation_level, llvm::PassBuilder::ThinLTOPhase::PreLink, debug); - // TODO: Maybe this should be done at a module level - function_pass_manager.addPass(ExpandStaticAllocationPass()); + auto inliner_pass = + pass_builder.buildInlinerPipeline(optimisation_level, llvm::PassBuilder::ThinLTOPhase::PreLink, debug); - RuleSet rule_set; + // TODO: Maybe this should be done at a module level + function_pass_manager.addPass(ExpandStaticAllocationPass()); - // Defining the mapping - auto factory = RuleFactory(rule_set); + RuleSet rule_set; - factory.useStaticQubitArrayAllocation(); - factory.useStaticQubitAllocation(); - factory.useStaticResultAllocation(); + // Defining the mapping + auto factory = RuleFactory(rule_set); - factory.optimiseBranchQuatumOne(); - // factory.optimiseBranchQuatumZero(); + factory.useStaticQubitArrayAllocation(); + factory.useStaticQubitAllocation(); + factory.useStaticResultAllocation(); - factory.disableReferenceCounting(); - factory.disableAliasCounting(); - factory.disableStringSupport(); + factory.optimiseBranchQuatumOne(); + // factory.optimiseBranchQuatumZero(); - function_pass_manager.addPass(TransformationRulePass(std::move(rule_set))); + factory.disableReferenceCounting(); + factory.disableAliasCounting(); + factory.disableStringSupport(); - // Eliminate dead code - function_pass_manager.addPass(llvm::DCEPass()); - function_pass_manager.addPass(llvm::ADCEPass()); + function_pass_manager.addPass(TransformationRulePass(std::move(rule_set))); - // function_pass_manager.addPass(llvm::createCalledValuePropagationPass()); - // function_pass_manager.addPass(createSIFoldOperandsPass()); + // Eliminate dead code + function_pass_manager.addPass(llvm::DCEPass()); + function_pass_manager.addPass(llvm::ADCEPass()); - // Legacy passes: - // https://llvm.org/doxygen/group__LLVMCTransformsIPO.html#ga2ebfe3e0c3cca3b457708b4784ba93ff + // function_pass_manager.addPass(llvm::createCalledValuePropagationPass()); + // function_pass_manager.addPass(createSIFoldOperandsPass()); - // https://llvm.org/docs/NewPassManager.html - // modulePassManager.addPass(createModuleToCGSCCPassAdaptor(...)); - // InlinerPass() + // Legacy passes: + // https://llvm.org/doxygen/group__LLVMCTransformsIPO.html#ga2ebfe3e0c3cca3b457708b4784ba93ff - // auto &cgpm = inliner_pass.getPM(); - // cgpm.addPass(llvm::ADCEPass()); + // https://llvm.org/docs/NewPassManager.html + // modulePassManager.addPass(createModuleToCGSCCPassAdaptor(...)); + // InlinerPass() - // CGPM.addPass(createCGSCCToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(LoopFooPass()))); - // CGPM.addPass(createCGSCCToFunctionPassAdaptor(FunctionFooPass())); + // auto &cgpm = inliner_pass.getPM(); + // cgpm.addPass(llvm::ADCEPass()); - ret.addPass(createModuleToFunctionPassAdaptor(std::move(function_pass_manager))); + // CGPM.addPass(createCGSCCToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(LoopFooPass()))); + // CGPM.addPass(createCGSCCToFunctionPassAdaptor(FunctionFooPass())); - // TODO: Not available in 11 ret.addPass(llvm::createModuleToCGSCCPassAdaptor(std::move(CGPM))); + ret.addPass(createModuleToFunctionPassAdaptor(std::move(function_pass_manager))); - ret.addPass(llvm::AlwaysInlinerPass()); - ret.addPass(std::move(inliner_pass)); - // ret.addPass(); - // CGSCCA pass llvm::InlinerPass() + // TODO: Not available in 11 ret.addPass(llvm::createModuleToCGSCCPassAdaptor(std::move(CGPM))); - return ret; -} + ret.addPass(llvm::AlwaysInlinerPass()); + ret.addPass(std::move(inliner_pass)); + // ret.addPass(); + // CGSCCA pass llvm::InlinerPass() -llvm::ModulePassManager BaseProfile::createValidationModulePass( - llvm::PassBuilder &, llvm::PassBuilder::OptimizationLevel &, bool) -{ - throw std::runtime_error("Validator not implmented yet"); -} + return ret; + } -void BaseProfile::addFunctionAnalyses(FunctionAnalysisManager &fam) -{ - fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); -} + llvm::ModulePassManager BaseProfile::createValidationModulePass( + llvm::PassBuilder&, + llvm::PassBuilder::OptimizationLevel&, + bool) + { + throw std::runtime_error("Validator not implmented yet"); + } + + void BaseProfile::addFunctionAnalyses(FunctionAnalysisManager& fam) + { + fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); + } -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Profiles/BaseProfile.hpp b/src/Passes/Source/Profiles/BaseProfile.hpp index 55a22db085..8bdd4be35f 100644 --- a/src/Passes/Source/Profiles/BaseProfile.hpp +++ b/src/Passes/Source/Profiles/BaseProfile.hpp @@ -1,21 +1,29 @@ #pragma once -#include "Llvm/Llvm.hpp" +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + #include "Profiles/IProfile.hpp" -namespace microsoft { -namespace quantum { +#include "Llvm/Llvm.hpp" -class BaseProfile : public IProfile +namespace microsoft { -public: - llvm::ModulePassManager createGenerationModulePass(PassBuilder & pass_builder, - OptimizationLevel &optimisation_level, - bool debug) override; - llvm::ModulePassManager createValidationModulePass(PassBuilder & pass_builder, - OptimizationLevel &optimisation_level, - bool debug) override; - void addFunctionAnalyses(FunctionAnalysisManager &fam) override; -}; +namespace quantum +{ + + class BaseProfile : public IProfile + { + public: + llvm::ModulePassManager createGenerationModulePass( + PassBuilder& pass_builder, + OptimizationLevel& optimisation_level, + bool debug) override; + llvm::ModulePassManager createValidationModulePass( + PassBuilder& pass_builder, + OptimizationLevel& optimisation_level, + bool debug) override; + void addFunctionAnalyses(FunctionAnalysisManager& fam) override; + }; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Profiles/IProfile.cpp b/src/Passes/Source/Profiles/IProfile.cpp index f9e2370ae5..01ef4d2924 100644 --- a/src/Passes/Source/Profiles/IProfile.cpp +++ b/src/Passes/Source/Profiles/IProfile.cpp @@ -1,9 +1,14 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + #include "Profiles/IProfile.hpp" -namespace microsoft { -namespace quantum { +namespace microsoft +{ +namespace quantum +{ -IProfile::~IProfile() = default; + IProfile::~IProfile() = default; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Profiles/IProfile.hpp b/src/Passes/Source/Profiles/IProfile.hpp index d832bf0ac7..30d9fe041c 100644 --- a/src/Passes/Source/Profiles/IProfile.hpp +++ b/src/Passes/Source/Profiles/IProfile.hpp @@ -1,27 +1,33 @@ #pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. #include "Llvm/Llvm.hpp" -namespace microsoft { -namespace quantum { - -class IProfile +namespace microsoft +{ +namespace quantum { -public: - using PassBuilder = llvm::PassBuilder; - using OptimizationLevel = PassBuilder::OptimizationLevel; - using FunctionAnalysisManager = llvm::FunctionAnalysisManager; - IProfile() = default; - virtual ~IProfile(); - virtual llvm::ModulePassManager createGenerationModulePass(PassBuilder & pass_builder, - OptimizationLevel &optimisation_level, - bool debug) = 0; - virtual llvm::ModulePassManager createValidationModulePass(PassBuilder & pass_builder, - OptimizationLevel &optimisation_level, - bool debug) = 0; - virtual void addFunctionAnalyses(FunctionAnalysisManager &fam) = 0; -}; + class IProfile + { + public: + using PassBuilder = llvm::PassBuilder; + using OptimizationLevel = PassBuilder::OptimizationLevel; + using FunctionAnalysisManager = llvm::FunctionAnalysisManager; + + IProfile() = default; + virtual ~IProfile(); + virtual llvm::ModulePassManager createGenerationModulePass( + PassBuilder& pass_builder, + OptimizationLevel& optimisation_level, + bool debug) = 0; + virtual llvm::ModulePassManager createValidationModulePass( + PassBuilder& pass_builder, + OptimizationLevel& optimisation_level, + bool debug) = 0; + virtual void addFunctionAnalyses(FunctionAnalysisManager& fam) = 0; + }; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Factory.cpp b/src/Passes/Source/Rules/Factory.cpp index 635af5306c..fa58db28dd 100644 --- a/src/Passes/Source/Rules/Factory.cpp +++ b/src/Passes/Source/Rules/Factory.cpp @@ -2,325 +2,324 @@ // Licensed under the MIT License. #include "Rules/Factory.hpp" - -#include "Llvm/Llvm.hpp" #include "Rules/Notation/Notation.hpp" -namespace microsoft { -namespace quantum { -using ReplacementRulePtr = RuleFactory::ReplacementRulePtr; -using namespace microsoft::quantum::notation; - -RuleFactory::RuleFactory(RuleSet &rule_set) - : rule_set_{rule_set} - , qubit_alloc_manager_{AllocationManager::createNew()} - , result_alloc_manager_{AllocationManager::createNew()} -{} - -RuleFactory::AllocationManagerPtr RuleFactory::qubitAllocationManager() const -{ - return qubit_alloc_manager_; -} +#include "Llvm/Llvm.hpp" -RuleFactory::AllocationManagerPtr RuleFactory::resultAllocationManager() const +namespace microsoft { - return result_alloc_manager_; -} - -void RuleFactory::removeFunctionCall(String const &name) +namespace quantum { - ReplacementRule ret{CallByNameOnly(name), deleteInstruction()}; - addRule(std::move(ret)); -} + using ReplacementRulePtr = RuleFactory::ReplacementRulePtr; + using namespace microsoft::quantum::notation; -void RuleFactory::useStaticQubitArrayAllocation() -{ - // TODO(tfr): Consider using weak pointers - auto qubit_alloc_manager = qubit_alloc_manager_; - - /// Allocation - auto allocation_replacer = [qubit_alloc_manager](Builder &, Value *val, Captures &cap, - Replacements &replacements) { - auto cst = llvm::dyn_cast(cap["size"]); - if (cst == nullptr) + RuleFactory::RuleFactory(RuleSet& rule_set) + : rule_set_{rule_set} + , qubit_alloc_manager_{AllocationManager::createNew()} + , result_alloc_manager_{AllocationManager::createNew()} { - return false; } - auto llvm_size = cst->getValue(); - auto name = val->getName().str(); - qubit_alloc_manager->allocate(name, llvm_size.getZExtValue()); - - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }; - - addRule({Call("__quantum__rt__qubit_allocate_array", "size"_cap = _), allocation_replacer}); - - /// Array access replacement - auto access_replacer = [qubit_alloc_manager](Builder &builder, Value *val, Captures &cap, - Replacements &replacements) { - // Getting the type pointer - - auto ptr_type = llvm::dyn_cast(val->getType()); - if (ptr_type == nullptr) + RuleFactory::AllocationManagerPtr RuleFactory::qubitAllocationManager() const { - return false; + return qubit_alloc_manager_; } - // Get the index and testing that it is a constant int - auto cst = llvm::dyn_cast(cap["index"]); - if (cst == nullptr) + RuleFactory::AllocationManagerPtr RuleFactory::resultAllocationManager() const { - // ... if not, we cannot perform the mapping. - return false; + return result_alloc_manager_; } - // Computing the index by getting the current index value and offseting by - // the offset at which the qubit array is allocated. - auto llvm_size = cst->getValue(); - auto offset = qubit_alloc_manager->getOffset(cap["arrayName"]->getName().str()); - - // Creating a new index APInt that is shifted by the offset of the allocation - auto idx = llvm::APInt(llvm_size.getBitWidth(), llvm_size.getZExtValue() + offset); - - // Computing offset - auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); - - // TODO(tfr): Understand what the significance of the addressspace is in relation to the - // QIR. Activate by uncommenting: - // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); - auto instr = new llvm::IntToPtrInst(new_index, ptr_type); - instr->takeName(val); - - // Replacing the instruction with new instruction - replacements.push_back({llvm::dyn_cast(val), instr}); - - // Deleting the getelement and cast operations - replacements.push_back({llvm::dyn_cast(cap["getElement"]), nullptr}); - replacements.push_back({llvm::dyn_cast(cap["cast"]), nullptr}); - - return true; - }; - - auto get_element = - Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); - auto cast_pattern = BitCast("getElement"_cap = get_element); - auto load_pattern = Load("cast"_cap = cast_pattern); - - addRule({std::move(load_pattern), access_replacer}); - - /// Release replacement - auto deleter = deleteInstruction(); - addRule({Call("__quantum__rt__qubit_release_array", "name"_cap = _), - [qubit_alloc_manager, deleter](Builder &builder, Value *val, Captures &cap, - Replacements &rep) { - qubit_alloc_manager->release(cap["name"]->getName().str()); - return deleter(builder, val, cap, rep); - } - - }); -} - -void RuleFactory::useStaticQubitAllocation() -{ - auto qubit_alloc_manager = qubit_alloc_manager_; - auto allocation_replacer = [qubit_alloc_manager](Builder &builder, Value *val, Captures &, - Replacements &replacements) { - // Getting the type pointer - auto ptr_type = llvm::dyn_cast(val->getType()); - if (ptr_type == nullptr) + void RuleFactory::removeFunctionCall(String const& name) { - return false; + ReplacementRule ret{CallByNameOnly(name), deleteInstruction()}; + addRule(std::move(ret)); } - // Computing the index by getting the current index value and offseting by - // the offset at which the qubit array is allocated. - auto offset = qubit_alloc_manager->allocate(); - - // Creating a new index APInt that is shifted by the offset of the allocation - // TODO(tfr): Get the bitwidth size from somewhere - auto idx = llvm::APInt(64, offset); - - // Computing offset - auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); - - // TODO(tfr): Understand what the significance of the addressspace is in relation to the - // QIR. Activate by uncommenting: - // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); - auto instr = new llvm::IntToPtrInst(new_index, ptr_type); - instr->takeName(val); - - // Replacing the instruction with new instruction - replacements.push_back({llvm::dyn_cast(val), instr}); - - return true; - }; - addRule({Call("__quantum__rt__qubit_allocate"), allocation_replacer}); - - // Removing release calls - removeFunctionCall("__quantum__rt__qubit_release"); -} - -void RuleFactory::useStaticResultAllocation() -{ - auto result_alloc_manager = result_alloc_manager_; - auto replace_measurement = [result_alloc_manager](Builder &builder, Value *val, Captures &cap, - Replacements &replacements) { - // Getting the type pointer - auto ptr_type = llvm::dyn_cast(val->getType()); - if (ptr_type == nullptr) + void RuleFactory::useStaticQubitArrayAllocation() { - return false; + // TODO(tfr): Consider using weak pointers + auto qubit_alloc_manager = qubit_alloc_manager_; + + /// Allocation + auto allocation_replacer = + [qubit_alloc_manager](Builder&, Value* val, Captures& cap, Replacements& replacements) { + auto cst = llvm::dyn_cast(cap["size"]); + if (cst == nullptr) + { + return false; + } + + auto llvm_size = cst->getValue(); + auto name = val->getName().str(); + qubit_alloc_manager->allocate(name, llvm_size.getZExtValue()); + + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }; + + addRule({Call("__quantum__rt__qubit_allocate_array", "size"_cap = _), allocation_replacer}); + + /// Array access replacement + auto access_replacer = + [qubit_alloc_manager](Builder& builder, Value* val, Captures& cap, Replacements& replacements) { + // Getting the type pointer + + auto ptr_type = llvm::dyn_cast(val->getType()); + if (ptr_type == nullptr) + { + return false; + } + + // Get the index and testing that it is a constant int + auto cst = llvm::dyn_cast(cap["index"]); + if (cst == nullptr) + { + // ... if not, we cannot perform the mapping. + return false; + } + + // Computing the index by getting the current index value and offseting by + // the offset at which the qubit array is allocated. + auto llvm_size = cst->getValue(); + auto offset = qubit_alloc_manager->getOffset(cap["arrayName"]->getName().str()); + + // Creating a new index APInt that is shifted by the offset of the allocation + auto idx = llvm::APInt(llvm_size.getBitWidth(), llvm_size.getZExtValue() + offset); + + // Computing offset + auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); + + // TODO(tfr): Understand what the significance of the addressspace is in relation to the + // QIR. Activate by uncommenting: + // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); + auto instr = new llvm::IntToPtrInst(new_index, ptr_type); + instr->takeName(val); + + // Replacing the instruction with new instruction + replacements.push_back({llvm::dyn_cast(val), instr}); + + // Deleting the getelement and cast operations + replacements.push_back({llvm::dyn_cast(cap["getElement"]), nullptr}); + replacements.push_back({llvm::dyn_cast(cap["cast"]), nullptr}); + + return true; + }; + + auto get_element = Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); + auto cast_pattern = BitCast("getElement"_cap = get_element); + auto load_pattern = Load("cast"_cap = cast_pattern); + + addRule({std::move(load_pattern), access_replacer}); + + /// Release replacement + auto deleter = deleteInstruction(); + addRule( + {Call("__quantum__rt__qubit_release_array", "name"_cap = _), + [qubit_alloc_manager, deleter](Builder& builder, Value* val, Captures& cap, Replacements& rep) { + qubit_alloc_manager->release(cap["name"]->getName().str()); + return deleter(builder, val, cap, rep); + } + + }); } - // Computing the index by getting the current index value and offseting by - // the offset at which the qubit array is allocated. - auto offset = result_alloc_manager->allocate(); - - // Creating a new index APInt that is shifted by the offset of the allocation - // TODO(tfr): Get the bitwidth size from somewhere - auto idx = llvm::APInt(64, offset); - - // Computing offset - auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); - - // TODO(tfr): Understand what the significance of the addressspace is in relation to the - // QIR. Activate by uncommenting: - // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); - auto instr = new llvm::IntToPtrInst(new_index, ptr_type); - instr->takeName(val); - - auto module = llvm::dyn_cast(val)->getModule(); - auto function = module->getFunction("__quantum__qis__mz__body"); - - std::vector arguments; - arguments.push_back(cap["qubit"]); - arguments.push_back(instr); - - if (!function) + void RuleFactory::useStaticQubitAllocation() { - std::vector types; - for (auto &arg : arguments) - { - types.push_back(arg->getType()); - } - - auto return_type = llvm::Type::getVoidTy(val->getContext()); - - llvm::FunctionType *fnc_type = llvm::FunctionType::get(return_type, types, false); - function = llvm::Function::Create(fnc_type, llvm::Function::ExternalLinkage, - "__quantum__qis__mz__body", module); + auto qubit_alloc_manager = qubit_alloc_manager_; + auto allocation_replacer = + [qubit_alloc_manager](Builder& builder, Value* val, Captures&, Replacements& replacements) { + // Getting the type pointer + auto ptr_type = llvm::dyn_cast(val->getType()); + if (ptr_type == nullptr) + { + return false; + } + + // Computing the index by getting the current index value and offseting by + // the offset at which the qubit array is allocated. + auto offset = qubit_alloc_manager->allocate(); + + // Creating a new index APInt that is shifted by the offset of the allocation + // TODO(tfr): Get the bitwidth size from somewhere + auto idx = llvm::APInt(64, offset); + + // Computing offset + auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); + + // TODO(tfr): Understand what the significance of the addressspace is in relation to the + // QIR. Activate by uncommenting: + // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); + auto instr = new llvm::IntToPtrInst(new_index, ptr_type); + instr->takeName(val); + + // Replacing the instruction with new instruction + replacements.push_back({llvm::dyn_cast(val), instr}); + + return true; + }; + addRule({Call("__quantum__rt__qubit_allocate"), allocation_replacer}); + + // Removing release calls + removeFunctionCall("__quantum__rt__qubit_release"); } - // Ensuring we are inserting after the instruction being deleted - builder.SetInsertPoint(llvm::dyn_cast(val)->getNextNode()); - - builder.CreateCall(function, arguments); - - // Replacing the instruction with new instruction - // TODO: (tfr): insert instruction before and then replace, with new call - replacements.push_back({llvm::dyn_cast(val), instr}); - - return true; - }; - - addRule({Call("__quantum__qis__m__body", "qubit"_cap = _), std::move(replace_measurement)}); -} - -void RuleFactory::optimiseBranchQuatumOne() -{ - auto get_one = Call("__quantum__rt__result_get_one"); - auto replace_branch_positive = [](Builder &builder, Value *val, Captures &cap, - Replacements &replacements) { - auto result = cap["result"]; - auto cond = llvm::dyn_cast(cap["cond"]); - // Replacing result - auto module = llvm::dyn_cast(val)->getModule(); - auto function = module->getFunction("__quantum__qir__read_result"); - std::vector arguments; - arguments.push_back(result); - - if (!function) + void RuleFactory::useStaticResultAllocation() { - std::vector types; - for (auto &arg : arguments) - { - types.push_back(arg->getType()); - } - - auto return_type = llvm::Type::getInt1Ty(val->getContext()); - - llvm::FunctionType *fnc_type = llvm::FunctionType::get(return_type, types, false); - function = llvm::Function::Create(fnc_type, llvm::Function::ExternalLinkage, - "__quantum__qir__read_result", module); + auto result_alloc_manager = result_alloc_manager_; + auto replace_measurement = + [result_alloc_manager](Builder& builder, Value* val, Captures& cap, Replacements& replacements) { + // Getting the type pointer + auto ptr_type = llvm::dyn_cast(val->getType()); + if (ptr_type == nullptr) + { + return false; + } + + // Computing the index by getting the current index value and offseting by + // the offset at which the qubit array is allocated. + auto offset = result_alloc_manager->allocate(); + + // Creating a new index APInt that is shifted by the offset of the allocation + // TODO(tfr): Get the bitwidth size from somewhere + auto idx = llvm::APInt(64, offset); + + // Computing offset + auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); + + // TODO(tfr): Understand what the significance of the addressspace is in relation to the + // QIR. Activate by uncommenting: + // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); + auto instr = new llvm::IntToPtrInst(new_index, ptr_type); + instr->takeName(val); + + auto module = llvm::dyn_cast(val)->getModule(); + auto function = module->getFunction("__quantum__qis__mz__body"); + + std::vector arguments; + arguments.push_back(cap["qubit"]); + arguments.push_back(instr); + + if (!function) + { + std::vector types; + for (auto& arg : arguments) + { + types.push_back(arg->getType()); + } + + auto return_type = llvm::Type::getVoidTy(val->getContext()); + + llvm::FunctionType* fnc_type = llvm::FunctionType::get(return_type, types, false); + function = llvm::Function::Create( + fnc_type, llvm::Function::ExternalLinkage, "__quantum__qis__mz__body", module); + } + + // Ensuring we are inserting after the instruction being deleted + builder.SetInsertPoint(llvm::dyn_cast(val)->getNextNode()); + + builder.CreateCall(function, arguments); + + // Replacing the instruction with new instruction + // TODO: (tfr): insert instruction before and then replace, with new call + replacements.push_back({llvm::dyn_cast(val), instr}); + + return true; + }; + + addRule({Call("__quantum__qis__m__body", "qubit"_cap = _), std::move(replace_measurement)}); } - builder.SetInsertPoint(llvm::dyn_cast(val)); - auto new_call = builder.CreateCall(function, arguments); - new_call->takeName(cond); + void RuleFactory::optimiseBranchQuatumOne() + { + auto get_one = Call("__quantum__rt__result_get_one"); + auto replace_branch_positive = [](Builder& builder, Value* val, Captures& cap, Replacements& replacements) { + auto result = cap["result"]; + auto cond = llvm::dyn_cast(cap["cond"]); + // Replacing result + auto module = llvm::dyn_cast(val)->getModule(); + auto function = module->getFunction("__quantum__qir__read_result"); + std::vector arguments; + arguments.push_back(result); + + if (!function) + { + std::vector types; + for (auto& arg : arguments) + { + types.push_back(arg->getType()); + } + + auto return_type = llvm::Type::getInt1Ty(val->getContext()); + + llvm::FunctionType* fnc_type = llvm::FunctionType::get(return_type, types, false); + function = llvm::Function::Create( + fnc_type, llvm::Function::ExternalLinkage, "__quantum__qir__read_result", module); + } + + builder.SetInsertPoint(llvm::dyn_cast(val)); + auto new_call = builder.CreateCall(function, arguments); + new_call->takeName(cond); + + for (auto& use : cond->uses()) + { + llvm::User* user = use.getUser(); + user->setOperand(use.getOperandNo(), new_call); + } + cond->replaceAllUsesWith(new_call); + + // Deleting the previous condition and function to fetch one + replacements.push_back({cond, nullptr}); + replacements.push_back({cap["one"], nullptr}); + + return false; + }; + + /* + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + br i1 %2, label %then0__1, label %continue__1 + */ + + // Variations of get_one + addRule( + {Branch("cond"_cap = Call("__quantum__rt__result_equal", "result"_cap = _, "one"_cap = get_one), _, _), + replace_branch_positive}); + + addRule( + {Branch("cond"_cap = Call("__quantum__rt__result_equal", "one"_cap = get_one, "result"_cap = _), _, _), + replace_branch_positive}); + } - for (auto &use : cond->uses()) + void RuleFactory::disableReferenceCounting() { - llvm::User *user = use.getUser(); - user->setOperand(use.getOperandNo(), new_call); + removeFunctionCall("__quantum__rt__array_update_reference_count"); + removeFunctionCall("__quantum__rt__string_update_reference_count"); + removeFunctionCall("__quantum__rt__result_update_reference_count"); } - cond->replaceAllUsesWith(new_call); - - // Deleting the previous condition and function to fetch one - replacements.push_back({cond, nullptr}); - replacements.push_back({cap["one"], nullptr}); - - return false; - }; - - /* - %1 = call %Result* @__quantum__rt__result_get_one() - %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) - br i1 %2, label %then0__1, label %continue__1 - */ - - // Variations of get_one - addRule({Branch("cond"_cap = - Call("__quantum__rt__result_equal", "result"_cap = _, "one"_cap = get_one), - _, _), - replace_branch_positive}); - - addRule({Branch("cond"_cap = - Call("__quantum__rt__result_equal", "one"_cap = get_one, "result"_cap = _), - _, _), - replace_branch_positive}); -} - -void RuleFactory::disableReferenceCounting() -{ - removeFunctionCall("__quantum__rt__array_update_reference_count"); - removeFunctionCall("__quantum__rt__string_update_reference_count"); - removeFunctionCall("__quantum__rt__result_update_reference_count"); -} -void RuleFactory::disableAliasCounting() -{ - removeFunctionCall("__quantum__rt__array_update_alias_count"); - removeFunctionCall("__quantum__rt__string_update_alias_count"); - removeFunctionCall("__quantum__rt__result_update_alias_count"); -} + void RuleFactory::disableAliasCounting() + { + removeFunctionCall("__quantum__rt__array_update_alias_count"); + removeFunctionCall("__quantum__rt__string_update_alias_count"); + removeFunctionCall("__quantum__rt__result_update_alias_count"); + } -void RuleFactory::disableStringSupport() -{ - removeFunctionCall("__quantum__rt__string_create"); - removeFunctionCall("__quantum__rt__string_release"); - removeFunctionCall("__quantum__rt__message"); -} + void RuleFactory::disableStringSupport() + { + removeFunctionCall("__quantum__rt__string_create"); + removeFunctionCall("__quantum__rt__string_release"); + removeFunctionCall("__quantum__rt__message"); + } -ReplacementRulePtr RuleFactory::addRule(ReplacementRule &&rule) -{ - auto ret = std::make_shared(std::move(rule)); + ReplacementRulePtr RuleFactory::addRule(ReplacementRule&& rule) + { + auto ret = std::make_shared(std::move(rule)); - rule_set_.addRule(ret); + rule_set_.addRule(ret); - return ret; -} + return ret; + } -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Factory.hpp b/src/Passes/Source/Rules/Factory.hpp index 00509b6148..4f7e73b469 100644 --- a/src/Passes/Source/Rules/Factory.hpp +++ b/src/Passes/Source/Rules/Factory.hpp @@ -3,87 +3,90 @@ // Licensed under the MIT License. #include "AllocationManager/AllocationManager.hpp" -#include "Llvm/Llvm.hpp" #include "Rules/ReplacementRule.hpp" #include "Rules/RuleSet.hpp" -#include +#include "Llvm/Llvm.hpp" -namespace microsoft { -namespace quantum { +#include -/// Rule factory provides a high-level methods to build a ruleset that -/// enforces certain aspects of QIR transformation. -class RuleFactory +namespace microsoft +{ +namespace quantum { -public: - using String = std::string; - using ReplacementRulePtr = std::shared_ptr; - using AllocationManagerPtr = AllocationManager::AllocationManagerPtr; - using Replacements = ReplacementRule::Replacements; - using Captures = OperandPrototype::Captures; - using Instruction = llvm::Instruction; - using Value = llvm::Value; - using Builder = ReplacementRule::Builder; - /// Constructor configuration. Explicit construction with - /// rule set to be configured, which can be moved using move - /// semantics. No copy allowed. - /// @{ - explicit RuleFactory(RuleSet &rule_set); - RuleFactory() = delete; - RuleFactory(RuleFactory const &) = delete; - RuleFactory(RuleFactory &&) = default; - ~RuleFactory() = default; - /// @} + /// Rule factory provides a high-level methods to build a ruleset that + /// enforces certain aspects of QIR transformation. + class RuleFactory + { + public: + using String = std::string; + using ReplacementRulePtr = std::shared_ptr; + using AllocationManagerPtr = AllocationManager::AllocationManagerPtr; + using Replacements = ReplacementRule::Replacements; + using Captures = OperandPrototype::Captures; + using Instruction = llvm::Instruction; + using Value = llvm::Value; + using Builder = ReplacementRule::Builder; + + /// Constructor configuration. Explicit construction with + /// rule set to be configured, which can be moved using move + /// semantics. No copy allowed. + /// @{ + explicit RuleFactory(RuleSet& rule_set); + RuleFactory() = delete; + RuleFactory(RuleFactory const&) = delete; + RuleFactory(RuleFactory&&) = default; + ~RuleFactory() = default; + /// @} - /// Generic rules - /// @{ - /// Removes all calls to functions with a specified name. - /// This function matches on name alone and ignores function - /// arguments. - void removeFunctionCall(String const &name); - /// @} + /// Generic rules + /// @{ + /// Removes all calls to functions with a specified name. + /// This function matches on name alone and ignores function + /// arguments. + void removeFunctionCall(String const& name); + /// @} - /// Conventions - /// @{ - void useStaticQubitArrayAllocation(); - void useStaticQubitAllocation(); - void useStaticResultAllocation(); - /// @} + /// Conventions + /// @{ + void useStaticQubitArrayAllocation(); + void useStaticQubitAllocation(); + void useStaticResultAllocation(); + /// @} - /// Optimisations - /// @{ - void optimiseBranchQuatumOne(); - void optimiseBranchQuatumZero(); - /// @} + /// Optimisations + /// @{ + void optimiseBranchQuatumOne(); + void optimiseBranchQuatumZero(); + /// @} - /// Disabling by feature - /// @{ - void disableReferenceCounting(); - void disableAliasCounting(); - void disableStringSupport(); - /// @} + /// Disabling by feature + /// @{ + void disableReferenceCounting(); + void disableAliasCounting(); + void disableStringSupport(); + /// @} - /// Allocation Managers - /// @{ - AllocationManagerPtr qubitAllocationManager() const; - AllocationManagerPtr resultAllocationManager() const; - /// @} -private: - ReplacementRulePtr addRule(ReplacementRule &&rule); + /// Allocation Managers + /// @{ + AllocationManagerPtr qubitAllocationManager() const; + AllocationManagerPtr resultAllocationManager() const; + /// @} + private: + ReplacementRulePtr addRule(ReplacementRule&& rule); - /// Affected artefacts - /// @{ - RuleSet &rule_set_; ///< The ruleset we are building - /// @} + /// Affected artefacts + /// @{ + RuleSet& rule_set_; ///< The ruleset we are building + /// @} - /// Allocation managers. Allocation managers for different types - /// @{ - AllocationManagerPtr qubit_alloc_manager_{nullptr}; - AllocationManagerPtr result_alloc_manager_{nullptr}; - /// @} -}; + /// Allocation managers. Allocation managers for different types + /// @{ + AllocationManagerPtr qubit_alloc_manager_{nullptr}; + AllocationManagerPtr result_alloc_manager_{nullptr}; + /// @} + }; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/BitCast.cpp b/src/Passes/Source/Rules/Notation/BitCast.cpp index 9e854bae1e..fa2578bf0a 100644 --- a/src/Passes/Source/Rules/Notation/BitCast.cpp +++ b/src/Passes/Source/Rules/Notation/BitCast.cpp @@ -1,29 +1,33 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Rules/Notation/Notation.hpp" #include "Rules/Operands/Any.hpp" #include "Rules/Operands/Call.hpp" #include "Rules/Operands/Instruction.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace microsoft { -namespace quantum { -namespace notation { +namespace microsoft +{ +namespace quantum +{ + namespace notation + { -using OperandPrototypePtr = std::shared_ptr; + using OperandPrototypePtr = std::shared_ptr; -OperandPrototypePtr BitCast(OperandPrototypePtr arg) -{ - auto cast_pattern = std::make_shared(); + OperandPrototypePtr BitCast(OperandPrototypePtr arg) + { + auto cast_pattern = std::make_shared(); - cast_pattern->addChild(arg); - return static_cast(cast_pattern); -} + cast_pattern->addChild(arg); + return static_cast(cast_pattern); + } -} // namespace notation -} // namespace quantum -} // namespace microsoft + } // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/Branch.cpp b/src/Passes/Source/Rules/Notation/Branch.cpp index d7889f3476..ce13231fd0 100644 --- a/src/Passes/Source/Rules/Notation/Branch.cpp +++ b/src/Passes/Source/Rules/Notation/Branch.cpp @@ -1,33 +1,36 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Rules/Notation/Notation.hpp" #include "Rules/Operands/Any.hpp" #include "Rules/Operands/Call.hpp" #include "Rules/Operands/Instruction.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace microsoft { -namespace quantum { -namespace notation { +namespace microsoft +{ +namespace quantum +{ + namespace notation + { -using OperandPrototypePtr = std::shared_ptr; + using OperandPrototypePtr = std::shared_ptr; -OperandPrototypePtr Branch(OperandPrototypePtr cond, OperandPrototypePtr arg1, - OperandPrototypePtr arg2) -{ - auto branch_pattern = std::make_shared(); + OperandPrototypePtr Branch(OperandPrototypePtr cond, OperandPrototypePtr arg1, OperandPrototypePtr arg2) + { + auto branch_pattern = std::make_shared(); - branch_pattern->addChild(cond); - branch_pattern->addChild(arg1); - branch_pattern->addChild(arg2); + branch_pattern->addChild(cond); + branch_pattern->addChild(arg1); + branch_pattern->addChild(arg2); - return static_cast(branch_pattern); -} + return static_cast(branch_pattern); + } -} // namespace notation -} // namespace quantum -} // namespace microsoft + } // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/CallByNameOnly.cpp b/src/Passes/Source/Rules/Notation/CallByNameOnly.cpp index 87cf97d886..25157140df 100644 --- a/src/Passes/Source/Rules/Notation/CallByNameOnly.cpp +++ b/src/Passes/Source/Rules/Notation/CallByNameOnly.cpp @@ -1,27 +1,31 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Rules/Notation/Notation.hpp" #include "Rules/Operands/Any.hpp" #include "Rules/Operands/Call.hpp" #include "Rules/Operands/Instruction.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace microsoft { -namespace quantum { -namespace notation { +namespace microsoft +{ +namespace quantum +{ + namespace notation + { -using OperandPrototypePtr = std::shared_ptr; + using OperandPrototypePtr = std::shared_ptr; -OperandPrototypePtr CallByNameOnly(std::string const &name) -{ - OperandPrototypePtr ret = std::make_shared(name); - return ret; -} + OperandPrototypePtr CallByNameOnly(std::string const& name) + { + OperandPrototypePtr ret = std::make_shared(name); + return ret; + } -} // namespace notation -} // namespace quantum -} // namespace microsoft + } // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/Capture.cpp b/src/Passes/Source/Rules/Notation/Capture.cpp index 867c438285..7532956bb8 100644 --- a/src/Passes/Source/Rules/Notation/Capture.cpp +++ b/src/Passes/Source/Rules/Notation/Capture.cpp @@ -1,37 +1,42 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Rules/Notation/Notation.hpp" #include "Rules/Operands/Any.hpp" #include "Rules/Operands/Call.hpp" #include "Rules/Operands/Instruction.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace microsoft { -namespace quantum { -namespace notation { - -using OperandPrototypePtr = std::shared_ptr; - -Capture::Capture(std::string const &name) - : name_{name} -{} - -OperandPrototypePtr Capture::operator=(OperandPrototypePtr const &other) +namespace microsoft { - auto ret = other->copy(); - ret->enableCapture(name_); - return ret; -} - -Capture operator""_cap(char const *name, std::size_t) +namespace quantum { - return Capture(name); -} - -} // namespace notation -} // namespace quantum -} // namespace microsoft + namespace notation + { + + using OperandPrototypePtr = std::shared_ptr; + + Capture::Capture(std::string const& name) + : name_{name} + { + } + + OperandPrototypePtr Capture::operator=(OperandPrototypePtr const& other) + { + auto ret = other->copy(); + ret->enableCapture(name_); + return ret; + } + + Capture operator""_cap(char const* name, std::size_t) + { + return Capture(name); + } + + } // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/Load.cpp b/src/Passes/Source/Rules/Notation/Load.cpp index 1f0711677b..2512d70c82 100644 --- a/src/Passes/Source/Rules/Notation/Load.cpp +++ b/src/Passes/Source/Rules/Notation/Load.cpp @@ -1,29 +1,33 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Rules/Notation/Notation.hpp" #include "Rules/Operands/Any.hpp" #include "Rules/Operands/Call.hpp" #include "Rules/Operands/Instruction.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace microsoft { -namespace quantum { -namespace notation { +namespace microsoft +{ +namespace quantum +{ + namespace notation + { -using OperandPrototypePtr = std::shared_ptr; + using OperandPrototypePtr = std::shared_ptr; -OperandPrototypePtr Load(OperandPrototypePtr arg) -{ - auto ret = std::make_shared(); + OperandPrototypePtr Load(OperandPrototypePtr arg) + { + auto ret = std::make_shared(); - ret->addChild(arg); - return static_cast(ret); -} + ret->addChild(arg); + return static_cast(ret); + } -} // namespace notation -} // namespace quantum -} // namespace microsoft + } // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/Notation.cpp b/src/Passes/Source/Rules/Notation/Notation.cpp index a72f33a49e..ebb6ad1ef3 100644 --- a/src/Passes/Source/Rules/Notation/Notation.cpp +++ b/src/Passes/Source/Rules/Notation/Notation.cpp @@ -2,30 +2,36 @@ // Licensed under the MIT License. #include "Rules/Notation/Notation.hpp" - -#include "Llvm/Llvm.hpp" #include "Rules/Operands/Any.hpp" #include "Rules/Operands/Call.hpp" #include "Rules/Operands/Instruction.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace microsoft { -namespace quantum { -namespace notation { - -std::function -deleteInstruction() +namespace microsoft +{ +namespace quantum { - return [](ReplacementRule::Builder &, ReplacementRule::Value *val, ReplacementRule::Captures &, - ReplacementRule::Replacements &replacements) { - replacements.push_back({llvm::dyn_cast(val), nullptr}); - return true; - }; -} + namespace notation + { + + std::function + deleteInstruction() + { + return [](ReplacementRule::Builder&, ReplacementRule::Value* val, ReplacementRule::Captures&, + ReplacementRule::Replacements& replacements) { + replacements.push_back({llvm::dyn_cast(val), nullptr}); + return true; + }; + } -} // namespace notation -} // namespace quantum -} // namespace microsoft + } // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/Notation.hpp b/src/Passes/Source/Rules/Notation/Notation.hpp index f638099848..fa3e5ba84b 100644 --- a/src/Passes/Source/Rules/Notation/Notation.hpp +++ b/src/Passes/Source/Rules/Notation/Notation.hpp @@ -2,55 +2,60 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Rules/Notation/Call.ipp" #include "Rules/Operands/Any.hpp" #include "Rules/Operands/Call.hpp" #include "Rules/Operands/Instruction.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace microsoft { -namespace quantum { -namespace notation { - -using OperandPrototypePtr = std::shared_ptr; - -class Capture +namespace microsoft +{ +namespace quantum { -public: - Capture(std::string const &name); - OperandPrototypePtr operator=(OperandPrototypePtr const &other); - -private: - std::string name_{}; -}; - -/// @{ -template -OperandPrototypePtr Call(std::string const &name, Args... args); -OperandPrototypePtr CallByNameOnly(std::string const &name); -OperandPrototypePtr BitCast(OperandPrototypePtr arg); -OperandPrototypePtr Branch(OperandPrototypePtr cond, OperandPrototypePtr arg1, - OperandPrototypePtr arg2); -OperandPrototypePtr Load(OperandPrototypePtr arg); -OperandPrototypePtr Store(OperandPrototypePtr target, OperandPrototypePtr value); -/// @} - -/// @{ -static std::shared_ptr _ = std::make_shared(); -/// @} - -/// @{ -std::function -deleteInstruction(); - -/// @} - -Capture operator""_cap(char const *name, std::size_t); - -} // namespace notation -} // namespace quantum -} // namespace microsoft + namespace notation + { + + using OperandPrototypePtr = std::shared_ptr; + + class Capture + { + public: + Capture(std::string const& name); + OperandPrototypePtr operator=(OperandPrototypePtr const& other); + + private: + std::string name_{}; + }; + + /// @{ + template OperandPrototypePtr Call(std::string const& name, Args... args); + OperandPrototypePtr CallByNameOnly(std::string const& name); + OperandPrototypePtr BitCast(OperandPrototypePtr arg); + OperandPrototypePtr Branch(OperandPrototypePtr cond, OperandPrototypePtr arg1, OperandPrototypePtr arg2); + OperandPrototypePtr Load(OperandPrototypePtr arg); + OperandPrototypePtr Store(OperandPrototypePtr target, OperandPrototypePtr value); + /// @} + + /// @{ + static std::shared_ptr _ = std::make_shared(); + /// @} + + /// @{ + std::function + deleteInstruction(); + + /// @} + + Capture operator""_cap(char const* name, std::size_t); + + } // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Notation/Store.cpp b/src/Passes/Source/Rules/Notation/Store.cpp index 2c8379274f..dab507210a 100644 --- a/src/Passes/Source/Rules/Notation/Store.cpp +++ b/src/Passes/Source/Rules/Notation/Store.cpp @@ -1,28 +1,32 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Rules/Notation/Notation.hpp" #include "Rules/Operands/Instruction.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace microsoft { -namespace quantum { -namespace notation { +namespace microsoft +{ +namespace quantum +{ + namespace notation + { -using OperandPrototypePtr = std::shared_ptr; + using OperandPrototypePtr = std::shared_ptr; -OperandPrototypePtr Store(OperandPrototypePtr target, OperandPrototypePtr value) -{ - auto ret = std::make_shared(); + OperandPrototypePtr Store(OperandPrototypePtr target, OperandPrototypePtr value) + { + auto ret = std::make_shared(); - ret->addChild(target); - ret->addChild(value); - return static_cast(ret); -} + ret->addChild(target); + ret->addChild(value); + return static_cast(ret); + } -} // namespace notation -} // namespace quantum -} // namespace microsoft + } // namespace notation +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/OperandPrototype.cpp b/src/Passes/Source/Rules/OperandPrototype.cpp index 31d4a68194..bfb0647e05 100644 --- a/src/Passes/Source/Rules/OperandPrototype.cpp +++ b/src/Passes/Source/Rules/OperandPrototype.cpp @@ -3,89 +3,91 @@ #include "Rules/OperandPrototype.hpp" -namespace microsoft { -namespace quantum { - -OperandPrototype::~OperandPrototype() = default; -bool OperandPrototype::matchChildren(Value *value, Captures &captures) const +namespace microsoft +{ +namespace quantum { - auto user = llvm::dyn_cast(value); - if (!children_.empty()) - { - if (user == nullptr) + + OperandPrototype::~OperandPrototype() = default; + bool OperandPrototype::matchChildren(Value* value, Captures& captures) const { - return false; + auto user = llvm::dyn_cast(value); + if (!children_.empty()) + { + if (user == nullptr) + { + return false; + } + + if (user->getNumOperands() != children_.size()) + { + return false; + } + + uint64_t i = 0; + while (i < children_.size()) + { + auto v = user->getOperand(static_cast(i)); + if (!children_[i]->match(v, captures)) + { + return false; + } + ++i; + } + + // llvm::errs() << "SUCCESS MATCH: " << *value << "\n"; + + return true; + } + + // llvm::errs() << "SUCCESS MATCH: " << *value << " " << user->getNumOperands() << "\n"; + // TODO: Check other possibilities for value + + return true; } - if (user->getNumOperands() != children_.size()) + void OperandPrototype::addChild(Child const& child) { - return false; + children_.push_back(child); } - uint64_t i = 0; - while (i < children_.size()) + void OperandPrototype::enableCapture(std::string capture_name) { - auto v = user->getOperand(static_cast(i)); - if (!children_[i]->match(v, captures)) - { - return false; - } - ++i; + capture_name_ = capture_name; } - // llvm::errs() << "SUCCESS MATCH: " << *value << "\n"; - - return true; - } - - // llvm::errs() << "SUCCESS MATCH: " << *value << " " << user->getNumOperands() << "\n"; - // TODO: Check other possibilities for value - - return true; -} - -void OperandPrototype::addChild(Child const &child) -{ - children_.push_back(child); -} - -void OperandPrototype::enableCapture(std::string capture_name) -{ - capture_name_ = capture_name; -} - -bool OperandPrototype::fail(Value * /*value*/, Captures & /*captures*/) const -{ - return false; -} + bool OperandPrototype::fail(Value* /*value*/, Captures& /*captures*/) const + { + return false; + } -bool OperandPrototype::success(Value *value, Captures &captures) const -{ - capture(value, captures); + bool OperandPrototype::success(Value* value, Captures& captures) const + { + capture(value, captures); + + auto ret = matchChildren(value, captures); + if (!ret) + { + uncapture(value, captures); + } + return ret; + } - auto ret = matchChildren(value, captures); - if (!ret) - { - uncapture(value, captures); - } - return ret; -} + void OperandPrototype::capture(Value* value, Captures& captures) const + { + if (!capture_name_.empty()) + { + captures[capture_name_] = value; + } + } -void OperandPrototype::capture(Value *value, Captures &captures) const -{ - if (!capture_name_.empty()) - { - captures[capture_name_] = value; - } -} + void OperandPrototype::uncapture(Value* /*value*/, Captures& captures) const + { + if (!capture_name_.empty()) + { + captures.erase(captures.find(capture_name_)); + } + } -void OperandPrototype::uncapture(Value * /*value*/, Captures &captures) const -{ - if (!capture_name_.empty()) - { - captures.erase(captures.find(capture_name_)); - } -} - -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/OperandPrototype.hpp b/src/Passes/Source/Rules/OperandPrototype.hpp index baab484234..6d615370c4 100644 --- a/src/Passes/Source/Rules/OperandPrototype.hpp +++ b/src/Passes/Source/Rules/OperandPrototype.hpp @@ -7,79 +7,81 @@ #include #include -namespace microsoft { -namespace quantum { - -/// OperandPrototype describes an IR pattern and allows matching against -/// LLVMs llvm::Value type. -class OperandPrototype +namespace microsoft +{ +namespace quantum { -public: - using Instruction = llvm::Instruction; - using String = std::string; - using Value = llvm::Value; - using Child = std::shared_ptr; - using Children = std::vector; - using Captures = std::unordered_map; - /// Constructors and desctructors - /// @{ - OperandPrototype() = default; - virtual ~OperandPrototype(); - /// @} + /// OperandPrototype describes an IR pattern and allows matching against + /// LLVMs llvm::Value type. + class OperandPrototype + { + public: + using Instruction = llvm::Instruction; + using String = std::string; + using Value = llvm::Value; + using Child = std::shared_ptr; + using Children = std::vector; + using Captures = std::unordered_map; + + /// Constructors and desctructors + /// @{ + OperandPrototype() = default; + virtual ~OperandPrototype(); + /// @} - /// Interface functions - /// @{ - virtual bool match(Value *value, Captures &captures) const = 0; - virtual Child copy() const = 0; - /// @} + /// Interface functions + /// @{ + virtual bool match(Value* value, Captures& captures) const = 0; + virtual Child copy() const = 0; + /// @} - /// Shared functionality - /// @{ + /// Shared functionality + /// @{ - /// Adds a child to be matched against the matchees children. Children - /// are matched in order and by size. - void addChild(Child const &child); + /// Adds a child to be matched against the matchees children. Children + /// are matched in order and by size. + void addChild(Child const& child); - /// Flags that this operand should be captured. This function ensures - /// that the captured operand is given a name. The subsequent logic - /// in this class is responsible for capturing (upon match) and - /// uncapturing (upon backtrack) with specified name - void enableCapture(std::string capture_name); - /// @} -protected: - /// Function to indicate match success or failure. Either of these - /// must be called prior to return from an implementation of - /// OperandPrototype::match. - /// @{ - bool fail(Value *value, Captures &captures) const; - bool success(Value *value, Captures &captures) const; - /// @} + /// Flags that this operand should be captured. This function ensures + /// that the captured operand is given a name. The subsequent logic + /// in this class is responsible for capturing (upon match) and + /// uncapturing (upon backtrack) with specified name + void enableCapture(std::string capture_name); + /// @} + protected: + /// Function to indicate match success or failure. Either of these + /// must be called prior to return from an implementation of + /// OperandPrototype::match. + /// @{ + bool fail(Value* value, Captures& captures) const; + bool success(Value* value, Captures& captures) const; + /// @} - /// Helper functions for the capture logic. - /// @{ - bool matchChildren(Value *value, Captures &captures) const; - void capture(Value *value, Captures &captures) const; - void uncapture(Value *value, Captures &captures) const; - /// @} + /// Helper functions for the capture logic. + /// @{ + bool matchChildren(Value* value, Captures& captures) const; + void capture(Value* value, Captures& captures) const; + void uncapture(Value* value, Captures& captures) const; + /// @} - /// Helper functions for operation - /// @{ - /// Shallow copy of the operand to allow name change - /// of the capture - void copyPropertiesFrom(OperandPrototype const &other) - { - capture_name_ = other.capture_name_; - children_ = other.children_; - } - /// @} -private: - /// Data variables for common matching functionality - /// @{ - std::string capture_name_{""}; ///< Name to captured value. Empty means no capture. - Children children_{}; ///< Children to match aginst the values children. - /// @} -}; + /// Helper functions for operation + /// @{ + /// Shallow copy of the operand to allow name change + /// of the capture + void copyPropertiesFrom(OperandPrototype const& other) + { + capture_name_ = other.capture_name_; + children_ = other.children_; + } + /// @} + private: + /// Data variables for common matching functionality + /// @{ + std::string capture_name_{""}; ///< Name to captured value. Empty means no capture. + Children children_{}; ///< Children to match aginst the values children. + /// @} + }; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Operands/Any.cpp b/src/Passes/Source/Rules/Operands/Any.cpp index 69e8cbc40d..9da3766e78 100644 --- a/src/Passes/Source/Rules/Operands/Any.cpp +++ b/src/Passes/Source/Rules/Operands/Any.cpp @@ -1,23 +1,25 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Rules/Operands/Any.hpp" #include "Rules/OperandPrototype.hpp" +#include "Rules/Operands/Any.hpp" -namespace microsoft { -namespace quantum { - -AnyPattern::AnyPattern() = default; -AnyPattern::~AnyPattern() = default; -bool AnyPattern::match(Value *instr, Captures &captures) const +namespace microsoft { - return success(instr, captures); -} - -AnyPattern::Child AnyPattern::copy() const +namespace quantum { - return std::make_shared(); -} -} // namespace quantum -} // namespace microsoft + AnyPattern::AnyPattern() = default; + AnyPattern::~AnyPattern() = default; + bool AnyPattern::match(Value* instr, Captures& captures) const + { + return success(instr, captures); + } + + AnyPattern::Child AnyPattern::copy() const + { + return std::make_shared(); + } + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Operands/Any.hpp b/src/Passes/Source/Rules/Operands/Any.hpp index a046ee4ed0..51fe59a18e 100644 --- a/src/Passes/Source/Rules/Operands/Any.hpp +++ b/src/Passes/Source/Rules/Operands/Any.hpp @@ -2,23 +2,26 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Rules/OperandPrototype.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace microsoft { -namespace quantum { - -class AnyPattern : public OperandPrototype +namespace microsoft +{ +namespace quantum { -public: - AnyPattern(); - ~AnyPattern() override; - bool match(Value *instr, Captures &captures) const override; - Child copy() const override; -}; -} // namespace quantum -} // namespace microsoft + class AnyPattern : public OperandPrototype + { + public: + AnyPattern(); + ~AnyPattern() override; + bool match(Value* instr, Captures& captures) const override; + Child copy() const override; + }; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Operands/Call.cpp b/src/Passes/Source/Rules/Operands/Call.cpp index 863b057710..20b8eda0fd 100644 --- a/src/Passes/Source/Rules/Operands/Call.cpp +++ b/src/Passes/Source/Rules/Operands/Call.cpp @@ -1,44 +1,46 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Rules/Operands/Call.hpp" - #include "Rules/OperandPrototype.hpp" +#include "Rules/Operands/Call.hpp" -namespace microsoft { -namespace quantum { - -CallPattern::CallPattern(String const &name) - : name_{name} -{} - -CallPattern::~CallPattern() = default; - -bool CallPattern::match(Value *instr, Captures &captures) const +namespace microsoft { - auto *call_instr = llvm::dyn_cast(instr); - if (call_instr == nullptr) - { - return fail(instr, captures); - } - - auto target_function = call_instr->getCalledFunction(); - auto name = target_function->getName(); - - if (name != name_) - { - return fail(instr, captures); - } - - return success(instr, captures); -} - -CallPattern::Child CallPattern::copy() const +namespace quantum { - auto ret = std::make_shared(name_); - ret->copyPropertiesFrom(*this); - return std::move(ret); -} -} // namespace quantum -} // namespace microsoft + CallPattern::CallPattern(String const& name) + : name_{name} + { + } + + CallPattern::~CallPattern() = default; + + bool CallPattern::match(Value* instr, Captures& captures) const + { + auto* call_instr = llvm::dyn_cast(instr); + if (call_instr == nullptr) + { + return fail(instr, captures); + } + + auto target_function = call_instr->getCalledFunction(); + auto name = target_function->getName(); + + if (name != name_) + { + return fail(instr, captures); + } + + return success(instr, captures); + } + + CallPattern::Child CallPattern::copy() const + { + auto ret = std::make_shared(name_); + ret->copyPropertiesFrom(*this); + return std::move(ret); + } + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Operands/Call.hpp b/src/Passes/Source/Rules/Operands/Call.hpp index f5c5aac931..6e8d81fd2b 100644 --- a/src/Passes/Source/Rules/Operands/Call.hpp +++ b/src/Passes/Source/Rules/Operands/Call.hpp @@ -2,29 +2,32 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Rules/OperandPrototype.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace microsoft { -namespace quantum { - -class CallPattern : public OperandPrototype +namespace microsoft +{ +namespace quantum { -public: - using String = std::string; - CallPattern(String const &name); - ~CallPattern() override; + class CallPattern : public OperandPrototype + { + public: + using String = std::string; + CallPattern(String const& name); + + ~CallPattern() override; - bool match(Value *instr, Captures &captures) const override; - Child copy() const override; + bool match(Value* instr, Captures& captures) const override; + Child copy() const override; -private: - String name_{}; -}; + private: + String name_{}; + }; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Operands/Instruction.cpp b/src/Passes/Source/Rules/Operands/Instruction.cpp index 320a1e72cf..9ee7a05331 100644 --- a/src/Passes/Source/Rules/Operands/Instruction.cpp +++ b/src/Passes/Source/Rules/Operands/Instruction.cpp @@ -1,34 +1,32 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Rules/Operands/Instruction.hpp" - #include "Rules/OperandPrototype.hpp" +#include "Rules/Operands/Instruction.hpp" -namespace microsoft { -namespace quantum { - -template -InstructionPattern::~InstructionPattern() = default; -template -bool InstructionPattern::match(Value *instr, Captures &captures) const +namespace microsoft { - auto *load_instr = llvm::dyn_cast(instr); - if (load_instr == nullptr) - { - return fail(instr, captures); - } - - return success(instr, captures); -} - -template -typename InstructionPattern::Child InstructionPattern::copy() const +namespace quantum { - auto ret = std::make_shared>(); - ret->copyPropertiesFrom(*this); - return std::move(ret); -} + + template InstructionPattern::~InstructionPattern() = default; + template bool InstructionPattern::match(Value* instr, Captures& captures) const + { + auto* load_instr = llvm::dyn_cast(instr); + if (load_instr == nullptr) + { + return fail(instr, captures); + } + + return success(instr, captures); + } + + template typename InstructionPattern::Child InstructionPattern::copy() const + { + auto ret = std::make_shared>(); + ret->copyPropertiesFrom(*this); + return std::move(ret); + } // TODO(tfr): This seems to be a bug in LLVM. Template instantiations in // a single translation unit is not supposed to reinstantiate across other @@ -42,11 +40,11 @@ typename InstructionPattern::Child InstructionPattern::copy() const // for more information #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wweak-template-vtables" -template class InstructionPattern; -template class InstructionPattern; -template class InstructionPattern; -template class InstructionPattern; + template class InstructionPattern; + template class InstructionPattern; + template class InstructionPattern; + template class InstructionPattern; #pragma clang diagnostic pop -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/Operands/Instruction.hpp b/src/Passes/Source/Rules/Operands/Instruction.hpp index a116ad7007..6de1349cae 100644 --- a/src/Passes/Source/Rules/Operands/Instruction.hpp +++ b/src/Passes/Source/Rules/Operands/Instruction.hpp @@ -2,29 +2,31 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Rules/OperandPrototype.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace microsoft { -namespace quantum { - -template -class InstructionPattern : public OperandPrototype +namespace microsoft +{ +namespace quantum { -public: - using OperandPrototype::OperandPrototype; - ~InstructionPattern() override; - bool match(Value *instr, Captures &captures) const override; - Child copy() const override; -}; -using StorePattern = InstructionPattern; -using LoadPattern = InstructionPattern; -using BitCastPattern = InstructionPattern; -using BranchPattern = InstructionPattern; + template class InstructionPattern : public OperandPrototype + { + public: + using OperandPrototype::OperandPrototype; + ~InstructionPattern() override; + bool match(Value* instr, Captures& captures) const override; + Child copy() const override; + }; + + using StorePattern = InstructionPattern; + using LoadPattern = InstructionPattern; + using BitCastPattern = InstructionPattern; + using BranchPattern = InstructionPattern; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/ReplacementRule.cpp b/src/Passes/Source/Rules/ReplacementRule.cpp index 76d9c881bf..81c2f10000 100644 --- a/src/Passes/Source/Rules/ReplacementRule.cpp +++ b/src/Passes/Source/Rules/ReplacementRule.cpp @@ -3,44 +3,46 @@ #include "Rules/ReplacementRule.hpp" -namespace microsoft { -namespace quantum { - -ReplacementRule::ReplacementRule(OperandPrototypePtr &&pattern, ReplaceFunction &&replacer) - : pattern_{std::move(pattern)} - , replacer_{std::move(replacer)} -{} - -void ReplacementRule::setPattern(OperandPrototypePtr &&pattern) +namespace microsoft { - pattern_ = std::move(pattern); -} - -void ReplacementRule::setReplacer(ReplaceFunction const &replacer) -{ - replacer_ = replacer; -} - -bool ReplacementRule::match(Value *value, Captures &captures) const +namespace quantum { - if (pattern_ == nullptr) - { - return false; - } - - return pattern_->match(value, captures); -} - -bool ReplacementRule::replace(Builder &builder, Value *value, Captures &captures, - Replacements &replacements) const -{ - if (replacer_) - { - return replacer_(builder, value, captures, replacements); - } - - return false; -} -} // namespace quantum -} // namespace microsoft + ReplacementRule::ReplacementRule(OperandPrototypePtr&& pattern, ReplaceFunction&& replacer) + : pattern_{std::move(pattern)} + , replacer_{std::move(replacer)} + { + } + + void ReplacementRule::setPattern(OperandPrototypePtr&& pattern) + { + pattern_ = std::move(pattern); + } + + void ReplacementRule::setReplacer(ReplaceFunction const& replacer) + { + replacer_ = replacer; + } + + bool ReplacementRule::match(Value* value, Captures& captures) const + { + if (pattern_ == nullptr) + { + return false; + } + + return pattern_->match(value, captures); + } + + bool ReplacementRule::replace(Builder& builder, Value* value, Captures& captures, Replacements& replacements) const + { + if (replacer_) + { + return replacer_(builder, value, captures, replacements); + } + + return false; + } + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/ReplacementRule.hpp b/src/Passes/Source/Rules/ReplacementRule.hpp index dc5a8c9b5f..b0c610f2c5 100644 --- a/src/Passes/Source/Rules/ReplacementRule.hpp +++ b/src/Passes/Source/Rules/ReplacementRule.hpp @@ -2,65 +2,67 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm/Llvm.hpp" #include "Rules/Operands/Any.hpp" #include "Rules/Operands/Call.hpp" #include "Rules/Operands/Instruction.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace microsoft { -namespace quantum { - -/// Rule that describes a pattern and how to make a replacement of the matched values. -/// The class contians a OprandPrototype which is used to test whether an LLVM IR value -/// follows a specific pattern. The class also holds a function pointer to logic that -/// allows replacement of the specified value. -class ReplacementRule +namespace microsoft +{ +namespace quantum { -public: - using Captures = OperandPrototype::Captures; - using Instruction = llvm::Instruction; - using Value = llvm::Value; - using OperandPrototypePtr = std::shared_ptr; - using Builder = llvm::IRBuilder<>; - using Replacements = std::vector>; - using ReplaceFunction = std::function; - /// Constructorss and destructors - /// @{ - ReplacementRule() = default; - ReplacementRule(OperandPrototypePtr &&pattern, ReplaceFunction &&replacer); - /// @} + /// Rule that describes a pattern and how to make a replacement of the matched values. + /// The class contians a OprandPrototype which is used to test whether an LLVM IR value + /// follows a specific pattern. The class also holds a function pointer to logic that + /// allows replacement of the specified value. + class ReplacementRule + { + public: + using Captures = OperandPrototype::Captures; + using Instruction = llvm::Instruction; + using Value = llvm::Value; + using OperandPrototypePtr = std::shared_ptr; + using Builder = llvm::IRBuilder<>; + using Replacements = std::vector>; + using ReplaceFunction = std::function; + + /// Constructorss and destructors + /// @{ + ReplacementRule() = default; + ReplacementRule(OperandPrototypePtr&& pattern, ReplaceFunction&& replacer); + /// @} - /// Rule configuration - /// @{ + /// Rule configuration + /// @{ - /// Sets the pattern describing logic to be replaced. - void setPattern(OperandPrototypePtr &&pattern); + /// Sets the pattern describing logic to be replaced. + void setPattern(OperandPrototypePtr&& pattern); - /// Sets the replacer logic which given a successful match will perform - /// a replacement on the IR. - void setReplacer(ReplaceFunction const &replacer); - /// @} + /// Sets the replacer logic which given a successful match will perform + /// a replacement on the IR. + void setReplacer(ReplaceFunction const& replacer); + /// @} - /// Operation - /// @{ - /// Tests whether a given value matches the rule pattern and store captures. - /// The function returns true if the match was successful in which case captures - /// are recorded. - bool match(Value *value, Captures &captures) const; + /// Operation + /// @{ + /// Tests whether a given value matches the rule pattern and store captures. + /// The function returns true if the match was successful in which case captures + /// are recorded. + bool match(Value* value, Captures& captures) const; - /// Invokes the replacer given a matched value and its corresponding captures - // - bool replace(Builder &builder, Value *value, Captures &captures, - Replacements &replacements) const; - /// @} -private: - OperandPrototypePtr pattern_{nullptr}; - ReplaceFunction replacer_{nullptr}; -}; + /// Invokes the replacer given a matched value and its corresponding captures + // + bool replace(Builder& builder, Value* value, Captures& captures, Replacements& replacements) const; + /// @} + private: + OperandPrototypePtr pattern_{nullptr}; + ReplaceFunction replacer_{nullptr}; + }; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/RuleSet.cpp b/src/Passes/Source/Rules/RuleSet.cpp index e592645c7d..817864159c 100644 --- a/src/Passes/Source/Rules/RuleSet.cpp +++ b/src/Passes/Source/Rules/RuleSet.cpp @@ -1,42 +1,44 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Rules/RuleSet.hpp" - #include "AllocationManager/AllocationManager.hpp" -#include "Llvm/Llvm.hpp" #include "Rules/Factory.hpp" #include "Rules/ReplacementRule.hpp" +#include "Rules/RuleSet.hpp" + +#include "Llvm/Llvm.hpp" #include #include -namespace microsoft { -namespace quantum { - -bool RuleSet::matchAndReplace(Instruction *value, Replacements &replacements) +namespace microsoft +{ +namespace quantum { - Captures captures; - for (auto const &rule : rules_) - { - // Checking if the rule is matched and keep track of captured nodes - if (rule->match(value, captures)) + + bool RuleSet::matchAndReplace(Instruction* value, Replacements& replacements) { + Captures captures; + for (auto const& rule : rules_) + { + // Checking if the rule is matched and keep track of captured nodes + if (rule->match(value, captures)) + { - // If it is matched, we attempt to replace it - llvm::IRBuilder<> builder{value}; - if (rule->replace(builder, value, captures, replacements)) - { - return true; - } + // If it is matched, we attempt to replace it + llvm::IRBuilder<> builder{value}; + if (rule->replace(builder, value, captures, replacements)) + { + return true; + } + } + } + return false; } - } - return false; -} -void RuleSet::addRule(ReplacementRulePtr const &rule) -{ - rules_.push_back(rule); -} + void RuleSet::addRule(ReplacementRulePtr const& rule) + { + rules_.push_back(rule); + } -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Rules/RuleSet.hpp b/src/Passes/Source/Rules/RuleSet.hpp index e6226874bc..8f335ea065 100644 --- a/src/Passes/Source/Rules/RuleSet.hpp +++ b/src/Passes/Source/Rules/RuleSet.hpp @@ -3,64 +3,67 @@ // Licensed under the MIT License. #include "AllocationManager/AllocationManager.hpp" -#include "Llvm/Llvm.hpp" #include "Rules/OperandPrototype.hpp" #include "Rules/ReplacementRule.hpp" +#include "Llvm/Llvm.hpp" + #include #include -namespace microsoft { -namespace quantum { - -/// RuleSet contains a set of replacement rules and the corresponding logic -/// to apply the rules. The class allows one to apply the rules by which -/// each rule is tested one-by-one until a successful attempt at performing -/// a replace has happened, or the list was exhausted. -class RuleSet +namespace microsoft +{ +namespace quantum { -public: - using ReplacementRulePtr = std::shared_ptr; - using Rules = std::vector; - using Replacements = ReplacementRule::Replacements; - using Captures = OperandPrototype::Captures; - using Instruction = llvm::Instruction; - using Value = llvm::Value; - using Builder = ReplacementRule::Builder; - using AllocationManagerPtr = AllocationManager::AllocationManagerPtr; - /// Constructors - /// @{ - RuleSet() = default; - RuleSet(RuleSet const &) = default; - RuleSet(RuleSet &&) = default; - ~RuleSet() = default; - /// @} + /// RuleSet contains a set of replacement rules and the corresponding logic + /// to apply the rules. The class allows one to apply the rules by which + /// each rule is tested one-by-one until a successful attempt at performing + /// a replace has happened, or the list was exhausted. + class RuleSet + { + public: + using ReplacementRulePtr = std::shared_ptr; + using Rules = std::vector; + using Replacements = ReplacementRule::Replacements; + using Captures = OperandPrototype::Captures; + using Instruction = llvm::Instruction; + using Value = llvm::Value; + using Builder = ReplacementRule::Builder; + using AllocationManagerPtr = AllocationManager::AllocationManagerPtr; + + /// Constructors + /// @{ + RuleSet() = default; + RuleSet(RuleSet const&) = default; + RuleSet(RuleSet&&) = default; + ~RuleSet() = default; + /// @} - /// Operators - /// @{ - RuleSet &operator=(RuleSet const &) = default; - RuleSet &operator=(RuleSet &&) = default; - // TODO(tfr): add RuleSet operator&(RuleSet const &other); - /// @} + /// Operators + /// @{ + RuleSet& operator=(RuleSet const&) = default; + RuleSet& operator=(RuleSet&&) = default; + // TODO(tfr): add RuleSet operator&(RuleSet const &other); + /// @} - /// Operating rule sets - /// @{ - /// Matches patterns and runs the replacement routines if a match - /// is found. The function returns true if a pattern is matched and - /// and the replacement was a success. In all other cases, it returns - /// false. - bool matchAndReplace(Instruction *value, Replacements &replacements); - /// @} + /// Operating rule sets + /// @{ + /// Matches patterns and runs the replacement routines if a match + /// is found. The function returns true if a pattern is matched and + /// and the replacement was a success. In all other cases, it returns + /// false. + bool matchAndReplace(Instruction* value, Replacements& replacements); + /// @} - /// Set up and configuration - /// @{ - /// Adds a new replacement rule to the set. - void addRule(ReplacementRulePtr const &rule); - /// @} -private: - Rules rules_; ///< Rules that describes QIR mappings -}; + /// Set up and configuration + /// @{ + /// Adds a new replacement rule to the set. + void addRule(ReplacementRulePtr const& rule); + /// @} + private: + Rules rules_; ///< Rules that describes QIR mappings + }; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft From 09e112f2a27ff4c4585d23b82c41adab08050942 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Sat, 14 Aug 2021 09:23:39 +0200 Subject: [PATCH 092/106] Fixing CI and style --- src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp | 41 +++-- src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp | 22 ++- src/Passes/Source/Apps/Qat/Qat.cpp | 150 +++++++++--------- src/Passes/Source/Commandline/Settings.hpp | 4 +- .../TransformationRule/TransformationRule.hpp | 2 +- src/Passes/Source/Profiles/BaseProfile.cpp | 9 +- src/Passes/Source/Rules/Factory.cpp | 34 ++-- src/Passes/Source/Rules/Factory.hpp | 2 +- src/Passes/Source/Rules/Notation/BitCast.cpp | 6 +- src/Passes/Source/Rules/Notation/Branch.cpp | 9 +- src/Passes/Source/Rules/Notation/Call.ipp | 8 +- .../Source/Rules/Notation/CallByNameOnly.cpp | 6 +- src/Passes/Source/Rules/Notation/Capture.cpp | 4 +- src/Passes/Source/Rules/Notation/Load.cpp | 6 +- src/Passes/Source/Rules/Notation/Notation.hpp | 22 +-- src/Passes/Source/Rules/Notation/Store.cpp | 6 +- src/Passes/Source/Rules/OperandPrototype.cpp | 20 +-- src/Passes/Source/Rules/OperandPrototype.hpp | 14 +- src/Passes/Source/Rules/Operands/Any.hpp | 2 +- src/Passes/Source/Rules/Operands/Call.hpp | 4 +- .../Source/Rules/Operands/Instruction.hpp | 4 +- src/Passes/Source/Rules/ReplacementRule.cpp | 4 +- src/Passes/Source/Rules/ReplacementRule.hpp | 22 +-- src/Passes/Source/Rules/RuleSet.hpp | 2 +- 24 files changed, 226 insertions(+), 177 deletions(-) diff --git a/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp b/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp index 4a0b5b32bf..47b8bbc500 100644 --- a/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp +++ b/src/Passes/Source/Apps/Qat/LlvmAnalysis.cpp @@ -11,18 +11,39 @@ namespace quantum { LlvmAnalyser::LlvmAnalyser(bool debug) - : loop_analysis_manager{debug} - , function_analysis_manager{debug} - , gscc_analysis_manager{debug} - , module_analysis_manager{debug} + : loop_analysis_manager_{debug} + , function_analysis_manager_{debug} + , gscc_analysis_manager_{debug} + , module_analysis_manager_{debug} { - pass_builder.registerModuleAnalyses(module_analysis_manager); - pass_builder.registerCGSCCAnalyses(gscc_analysis_manager); - pass_builder.registerFunctionAnalyses(function_analysis_manager); - pass_builder.registerLoopAnalyses(loop_analysis_manager); + pass_builder_.registerModuleAnalyses(module_analysis_manager_); + pass_builder_.registerCGSCCAnalyses(gscc_analysis_manager_); + pass_builder_.registerFunctionAnalyses(function_analysis_manager_); + pass_builder_.registerLoopAnalyses(loop_analysis_manager_); - pass_builder.crossRegisterProxies( - loop_analysis_manager, function_analysis_manager, gscc_analysis_manager, module_analysis_manager); + pass_builder_.crossRegisterProxies( + loop_analysis_manager_, function_analysis_manager_, gscc_analysis_manager_, module_analysis_manager_); + } + + llvm::PassBuilder& LlvmAnalyser::passBuilder() + { + return pass_builder_; + } + llvm::LoopAnalysisManager& LlvmAnalyser::loopAnalysisManager() + { + return loop_analysis_manager_; + } + llvm::FunctionAnalysisManager& LlvmAnalyser::functionAnalysisManager() + { + return function_analysis_manager_; + } + llvm::CGSCCAnalysisManager& LlvmAnalyser::gsccAnalysisManager() + { + return gscc_analysis_manager_; + } + llvm::ModuleAnalysisManager& LlvmAnalyser::moduleAnalysisManager() + { + return module_analysis_manager_; } } // namespace quantum diff --git a/src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp b/src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp index f90ba2b635..20d1e814da 100644 --- a/src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp +++ b/src/Passes/Source/Apps/Qat/LlvmAnalysis.hpp @@ -9,8 +9,9 @@ namespace microsoft namespace quantum { - struct LlvmAnalyser + class LlvmAnalyser { + public: /// Constructors /// @{ explicit LlvmAnalyser(bool debug); @@ -30,13 +31,22 @@ namespace quantum ~LlvmAnalyser() = default; /// @} + /// Acccess member functions + /// @{ + llvm::PassBuilder& passBuilder(); + llvm::LoopAnalysisManager& loopAnalysisManager(); + llvm::FunctionAnalysisManager& functionAnalysisManager(); + llvm::CGSCCAnalysisManager& gsccAnalysisManager(); + llvm::ModuleAnalysisManager& moduleAnalysisManager(); + /// @} + private: /// Objects used to run a set of passes /// @{ - llvm::PassBuilder pass_builder; - llvm::LoopAnalysisManager loop_analysis_manager; - llvm::FunctionAnalysisManager function_analysis_manager; - llvm::CGSCCAnalysisManager gscc_analysis_manager; - llvm::ModuleAnalysisManager module_analysis_manager; + llvm::PassBuilder pass_builder_; + llvm::LoopAnalysisManager loop_analysis_manager_; + llvm::FunctionAnalysisManager function_analysis_manager_; + llvm::CGSCCAnalysisManager gscc_analysis_manager_; + llvm::ModuleAnalysisManager module_analysis_manager_; /// @} }; diff --git a/src/Passes/Source/Apps/Qat/Qat.cpp b/src/Passes/Source/Apps/Qat/Qat.cpp index 20a3ba6079..8c29258fe1 100644 --- a/src/Passes/Source/Apps/Qat/Qat.cpp +++ b/src/Passes/Source/Apps/Qat/Qat.cpp @@ -18,89 +18,97 @@ using namespace microsoft::quantum; int main(int argc, char** argv) { - // Parsing commmandline arguments - Settings settings{ - {{"debug", "false"}, - {"generate", "false"}, - {"validate", "false"}, - {"profile", "base-profile"}, - {"S", "false"}}}; - - ParameterParser parser(settings); - parser.addFlag("debug"); - parser.addFlag("generate"); - parser.addFlag("validate"); - parser.addFlag("S"); - - parser.parseArgs(argc, argv); - - if (parser.arguments().empty()) + try { - std::cerr << "Usage: " << argv[0] << " [options] filename" << std::endl; - exit(-1); - } - - // Loading IR - LLVMContext context; - SMDiagnostic error; - auto module = parseIRFile(parser.getArg(0), error, context); - - if (!module) - { - std::cerr << "Invalid IR." << std::endl; - exit(-1); - } - - // Extracting commandline parameters - bool debug = settings.get("debug") == "true"; - bool generate = settings.get("generate") == "true"; - bool validate = settings.get("validate") == "true"; - auto optimisation_level = llvm::PassBuilder::OptimizationLevel::O1; - std::shared_ptr profile = std::make_shared(); + // Parsing commmandline arguments + Settings settings{ + {{"debug", "false"}, + {"generate", "false"}, + {"validate", "false"}, + {"profile", "base-profile"}, + {"S", "false"}}}; + + ParameterParser parser(settings); + parser.addFlag("debug"); + parser.addFlag("generate"); + parser.addFlag("validate"); + parser.addFlag("S"); + + parser.parseArgs(argc, argv); + + if (parser.arguments().empty()) + { + std::cerr << "Usage: " << argv[0] << " [options] filename" << std::endl; + exit(-1); + } - // In case we debug, we also print the settings to allow provide a full - // picture of what is going. - if (debug) - { - settings.print(); - } + // Loading IR + LLVMContext context; + SMDiagnostic error; + auto module = parseIRFile(parser.getArg(0), error, context); - // Checking if we are asked to generate a new QIR. If so, we will use - // the profile to setup passes to - if (generate) - { - // Creating pass builder - LlvmAnalyser analyser{debug}; + if (!module) + { + std::cerr << "Invalid IR." << std::endl; + exit(-1); + } - // Preparing pass for generation based on profile - profile->addFunctionAnalyses(analyser.function_analysis_manager); - auto module_pass_manager = - profile->createGenerationModulePass(analyser.pass_builder, optimisation_level, debug); + // Extracting commandline parameters + bool debug = settings.get("debug") == "true"; + bool generate = settings.get("generate") == "true"; + bool validate = settings.get("validate") == "true"; + auto optimisation_level = llvm::PassBuilder::OptimizationLevel::O1; + std::shared_ptr profile = std::make_shared(); - // Running the pass built by the profile - module_pass_manager.run(*module, analyser.module_analysis_manager); + // In case we debug, we also print the settings to allow provide a full + // picture of what is going. + if (debug) + { + settings.print(); + } - // Priniting either human readible LL code or byte - // code as a result, depending on the users preference. - if (settings.get("S") == "true") + // Checking if we are asked to generate a new QIR. If so, we will use + // the profile to setup passes to + if (generate) { - llvm::errs() << *module << "\n"; + // Creating pass builder + LlvmAnalyser analyser{debug}; + + // Preparing pass for generation based on profile + profile->addFunctionAnalyses(analyser.functionAnalysisManager()); + auto module_pass_manager = + profile->createGenerationModulePass(analyser.passBuilder(), optimisation_level, debug); + + // Running the pass built by the profile + module_pass_manager.run(*module, analyser.moduleAnalysisManager()); + + // Priniting either human readible LL code or byte + // code as a result, depending on the users preference. + if (settings.get("S") == "true") + { + llvm::errs() << *module << "\n"; + } + else + { + llvm::errs() << "Byte code ouput is not supported yet. Please add -S to get human readible " + "LL code.\n"; + } } - else + + if (validate) { - llvm::errs() << "Byte code ouput is not supported yet. Please add -S to get human readible LL code.\n"; + // Creating pass builder + LlvmAnalyser analyser{debug}; + + // Creating a validation pass manager + auto module_pass_manager = + profile->createValidationModulePass(analyser.passBuilder(), optimisation_level, debug); + module_pass_manager.run(*module, analyser.moduleAnalysisManager()); } } - - if (validate) + catch (std::exception const& e) { - // Creating pass builder - LlvmAnalyser analyser{debug}; - - // Creating a validation pass manager - auto module_pass_manager = - profile->createValidationModulePass(analyser.pass_builder, optimisation_level, debug); - module_pass_manager.run(*module, analyser.module_analysis_manager); + llvm::errs() << "An error occured: " << e.what() << "\n"; } return 0; diff --git a/src/Passes/Source/Commandline/Settings.hpp b/src/Passes/Source/Commandline/Settings.hpp index 84aa3b2ce3..d5c46a79d9 100644 --- a/src/Passes/Source/Commandline/Settings.hpp +++ b/src/Passes/Source/Commandline/Settings.hpp @@ -18,8 +18,8 @@ namespace quantum using String = std::string; using SettingsMap = std::unordered_map; - Settings(SettingsMap default_settings) - : settings_{default_settings} + explicit Settings(SettingsMap default_settings) + : settings_{std::move(default_settings)} { } diff --git a/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp b/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp index 6646fd3a97..831189a9dd 100644 --- a/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp +++ b/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp @@ -25,7 +25,7 @@ namespace quantum /// Constructors and destructors /// @{ - TransformationRulePass(RuleSet&& rule_set); + explicit TransformationRulePass(RuleSet&& rule_set); TransformationRulePass(TransformationRulePass const&) = delete; TransformationRulePass(TransformationRulePass&&) = default; ~TransformationRulePass() = default; diff --git a/src/Passes/Source/Profiles/BaseProfile.cpp b/src/Passes/Source/Profiles/BaseProfile.cpp index a6d16595b8..8446224b3a 100644 --- a/src/Passes/Source/Profiles/BaseProfile.cpp +++ b/src/Passes/Source/Profiles/BaseProfile.cpp @@ -22,12 +22,12 @@ namespace quantum auto ret = pass_builder.buildPerModuleDefaultPipeline(optimisation_level); // buildPerModuleDefaultPipeline buildModuleOptimizationPipeline auto function_pass_manager = pass_builder.buildFunctionSimplificationPipeline( - optimisation_level, llvm::PassBuilder::ThinLTOPhase::PreLink, debug); + optimisation_level, llvm::PassBuilder::ThinLTOPhase::None, debug); auto inliner_pass = - pass_builder.buildInlinerPipeline(optimisation_level, llvm::PassBuilder::ThinLTOPhase::PreLink, debug); + pass_builder.buildInlinerPipeline(optimisation_level, llvm::PassBuilder::ThinLTOPhase::None, debug); - // TODO: Maybe this should be done at a module level + // TODO(tfr): Maybe this should be done at a module level function_pass_manager.addPass(ExpandStaticAllocationPass()); RuleSet rule_set; @@ -70,7 +70,8 @@ namespace quantum ret.addPass(createModuleToFunctionPassAdaptor(std::move(function_pass_manager))); - // TODO: Not available in 11 ret.addPass(llvm::createModuleToCGSCCPassAdaptor(std::move(CGPM))); + // TODO(tfr): Not available in 11 + // ret.addPass(llvm::createModuleToCGSCCPassAdaptor(std::move(CGPM))); ret.addPass(llvm::AlwaysInlinerPass()); ret.addPass(std::move(inliner_pass)); diff --git a/src/Passes/Source/Rules/Factory.cpp b/src/Passes/Source/Rules/Factory.cpp index fa58db28dd..5528fb5ffa 100644 --- a/src/Passes/Source/Rules/Factory.cpp +++ b/src/Passes/Source/Rules/Factory.cpp @@ -32,7 +32,7 @@ namespace quantum void RuleFactory::removeFunctionCall(String const& name) { - ReplacementRule ret{CallByNameOnly(name), deleteInstruction()}; + ReplacementRule ret{callByNameOnly(name), deleteInstruction()}; addRule(std::move(ret)); } @@ -58,7 +58,7 @@ namespace quantum return true; }; - addRule({Call("__quantum__rt__qubit_allocate_array", "size"_cap = _), allocation_replacer}); + addRule({call("__quantum__rt__qubit_allocate_array", "size"_cap = _), allocation_replacer}); /// Array access replacement auto access_replacer = @@ -106,16 +106,16 @@ namespace quantum return true; }; - auto get_element = Call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); - auto cast_pattern = BitCast("getElement"_cap = get_element); - auto load_pattern = Load("cast"_cap = cast_pattern); + auto get_element = call("__quantum__rt__array_get_element_ptr_1d", "arrayName"_cap = _, "index"_cap = _); + auto cast_pattern = bitCast("getElement"_cap = get_element); + auto load_pattern = load("cast"_cap = cast_pattern); addRule({std::move(load_pattern), access_replacer}); /// Release replacement auto deleter = deleteInstruction(); addRule( - {Call("__quantum__rt__qubit_release_array", "name"_cap = _), + {call("__quantum__rt__qubit_release_array", "name"_cap = _), [qubit_alloc_manager, deleter](Builder& builder, Value* val, Captures& cap, Replacements& rep) { qubit_alloc_manager->release(cap["name"]->getName().str()); return deleter(builder, val, cap, rep); @@ -158,7 +158,7 @@ namespace quantum return true; }; - addRule({Call("__quantum__rt__qubit_allocate"), allocation_replacer}); + addRule({call("__quantum__rt__qubit_allocate"), allocation_replacer}); // Removing release calls removeFunctionCall("__quantum__rt__qubit_release"); @@ -203,9 +203,10 @@ namespace quantum if (!function) { std::vector types; - for (auto& arg : arguments) + types.resize(arguments.size()); + for (uint64_t i = 0; i < types.size(); ++i) { - types.push_back(arg->getType()); + types[i] = arguments[i]->getType(); } auto return_type = llvm::Type::getVoidTy(val->getContext()); @@ -221,18 +222,17 @@ namespace quantum builder.CreateCall(function, arguments); // Replacing the instruction with new instruction - // TODO: (tfr): insert instruction before and then replace, with new call + // TODO(tfr): (tfr): insert instruction before and then replace, with new call replacements.push_back({llvm::dyn_cast(val), instr}); return true; }; - addRule({Call("__quantum__qis__m__body", "qubit"_cap = _), std::move(replace_measurement)}); + addRule({call("__quantum__qis__m__body", "qubit"_cap = _), std::move(replace_measurement)}); } void RuleFactory::optimiseBranchQuatumOne() { - auto get_one = Call("__quantum__rt__result_get_one"); auto replace_branch_positive = [](Builder& builder, Value* val, Captures& cap, Replacements& replacements) { auto result = cap["result"]; auto cond = llvm::dyn_cast(cap["cond"]); @@ -245,9 +245,10 @@ namespace quantum if (!function) { std::vector types; - for (auto& arg : arguments) + types.resize(arguments.size()); + for (uint64_t i = 0; i < types.size(); ++i) { - types.push_back(arg->getType()); + types[i] = arguments[i]->getType(); } auto return_type = llvm::Type::getInt1Ty(val->getContext()); @@ -282,12 +283,13 @@ namespace quantum */ // Variations of get_one + auto get_one = call("__quantum__rt__result_get_one"); addRule( - {Branch("cond"_cap = Call("__quantum__rt__result_equal", "result"_cap = _, "one"_cap = get_one), _, _), + {branch("cond"_cap = call("__quantum__rt__result_equal", "result"_cap = _, "one"_cap = get_one), _, _), replace_branch_positive}); addRule( - {Branch("cond"_cap = Call("__quantum__rt__result_equal", "one"_cap = get_one, "result"_cap = _), _, _), + {branch("cond"_cap = call("__quantum__rt__result_equal", "one"_cap = get_one, "result"_cap = _), _, _), replace_branch_positive}); } diff --git a/src/Passes/Source/Rules/Factory.hpp b/src/Passes/Source/Rules/Factory.hpp index 4f7e73b469..9746db3a02 100644 --- a/src/Passes/Source/Rules/Factory.hpp +++ b/src/Passes/Source/Rules/Factory.hpp @@ -24,7 +24,7 @@ namespace quantum using ReplacementRulePtr = std::shared_ptr; using AllocationManagerPtr = AllocationManager::AllocationManagerPtr; using Replacements = ReplacementRule::Replacements; - using Captures = OperandPrototype::Captures; + using Captures = IOperandPrototype::Captures; using Instruction = llvm::Instruction; using Value = llvm::Value; using Builder = ReplacementRule::Builder; diff --git a/src/Passes/Source/Rules/Notation/BitCast.cpp b/src/Passes/Source/Rules/Notation/BitCast.cpp index fa2578bf0a..5493b80bd8 100644 --- a/src/Passes/Source/Rules/Notation/BitCast.cpp +++ b/src/Passes/Source/Rules/Notation/BitCast.cpp @@ -18,14 +18,14 @@ namespace quantum namespace notation { - using OperandPrototypePtr = std::shared_ptr; + using IOperandPrototypePtr = std::shared_ptr; - OperandPrototypePtr BitCast(OperandPrototypePtr arg) + IOperandPrototypePtr bitCast(IOperandPrototypePtr const& arg) { auto cast_pattern = std::make_shared(); cast_pattern->addChild(arg); - return static_cast(cast_pattern); + return static_cast(cast_pattern); } } // namespace notation diff --git a/src/Passes/Source/Rules/Notation/Branch.cpp b/src/Passes/Source/Rules/Notation/Branch.cpp index ce13231fd0..bc7aecae9c 100644 --- a/src/Passes/Source/Rules/Notation/Branch.cpp +++ b/src/Passes/Source/Rules/Notation/Branch.cpp @@ -18,9 +18,12 @@ namespace quantum namespace notation { - using OperandPrototypePtr = std::shared_ptr; + using IOperandPrototypePtr = std::shared_ptr; - OperandPrototypePtr Branch(OperandPrototypePtr cond, OperandPrototypePtr arg1, OperandPrototypePtr arg2) + IOperandPrototypePtr branch( + IOperandPrototypePtr const& cond, + IOperandPrototypePtr const& arg1, + IOperandPrototypePtr const& arg2) { auto branch_pattern = std::make_shared(); @@ -28,7 +31,7 @@ namespace quantum branch_pattern->addChild(arg1); branch_pattern->addChild(arg2); - return static_cast(branch_pattern); + return static_cast(branch_pattern); } } // namespace notation diff --git a/src/Passes/Source/Rules/Notation/Call.ipp b/src/Passes/Source/Rules/Notation/Call.ipp index da701e49c8..1c2ade7517 100644 --- a/src/Passes/Source/Rules/Notation/Call.ipp +++ b/src/Passes/Source/Rules/Notation/Call.ipp @@ -16,13 +16,13 @@ namespace microsoft { namespace quantum { namespace notation { -using OperandPrototypePtr = std::shared_ptr; +using IOperandPrototypePtr = std::shared_ptr; template -OperandPrototypePtr Call(std::string const &name, Args... args) +IOperandPrototypePtr call(std::string const &name, Args... args) { - OperandPrototypePtr ret = std::make_shared(name); - std::vector arguments{args...}; + IOperandPrototypePtr ret = std::make_shared(name); + std::vector arguments{args...}; // Adding arguments to matching for (auto &a : arguments) diff --git a/src/Passes/Source/Rules/Notation/CallByNameOnly.cpp b/src/Passes/Source/Rules/Notation/CallByNameOnly.cpp index 25157140df..c48b64ad1b 100644 --- a/src/Passes/Source/Rules/Notation/CallByNameOnly.cpp +++ b/src/Passes/Source/Rules/Notation/CallByNameOnly.cpp @@ -18,11 +18,11 @@ namespace quantum namespace notation { - using OperandPrototypePtr = std::shared_ptr; + using IOperandPrototypePtr = std::shared_ptr; - OperandPrototypePtr CallByNameOnly(std::string const& name) + IOperandPrototypePtr callByNameOnly(std::string const& name) { - OperandPrototypePtr ret = std::make_shared(name); + IOperandPrototypePtr ret = std::make_shared(name); return ret; } diff --git a/src/Passes/Source/Rules/Notation/Capture.cpp b/src/Passes/Source/Rules/Notation/Capture.cpp index 7532956bb8..f278c857ca 100644 --- a/src/Passes/Source/Rules/Notation/Capture.cpp +++ b/src/Passes/Source/Rules/Notation/Capture.cpp @@ -18,14 +18,14 @@ namespace quantum namespace notation { - using OperandPrototypePtr = std::shared_ptr; + using IOperandPrototypePtr = std::shared_ptr; Capture::Capture(std::string const& name) : name_{name} { } - OperandPrototypePtr Capture::operator=(OperandPrototypePtr const& other) + IOperandPrototypePtr Capture::operator=(IOperandPrototypePtr const& other) // NOLINT { auto ret = other->copy(); ret->enableCapture(name_); diff --git a/src/Passes/Source/Rules/Notation/Load.cpp b/src/Passes/Source/Rules/Notation/Load.cpp index 2512d70c82..9b91c10435 100644 --- a/src/Passes/Source/Rules/Notation/Load.cpp +++ b/src/Passes/Source/Rules/Notation/Load.cpp @@ -18,14 +18,14 @@ namespace quantum namespace notation { - using OperandPrototypePtr = std::shared_ptr; + using IOperandPrototypePtr = std::shared_ptr; - OperandPrototypePtr Load(OperandPrototypePtr arg) + IOperandPrototypePtr load(IOperandPrototypePtr const& arg) { auto ret = std::make_shared(); ret->addChild(arg); - return static_cast(ret); + return static_cast(ret); } } // namespace notation diff --git a/src/Passes/Source/Rules/Notation/Notation.hpp b/src/Passes/Source/Rules/Notation/Notation.hpp index fa3e5ba84b..254b4ceedb 100644 --- a/src/Passes/Source/Rules/Notation/Notation.hpp +++ b/src/Passes/Source/Rules/Notation/Notation.hpp @@ -19,25 +19,29 @@ namespace quantum namespace notation { - using OperandPrototypePtr = std::shared_ptr; + using IOperandPrototypePtr = std::shared_ptr; class Capture { public: - Capture(std::string const& name); - OperandPrototypePtr operator=(OperandPrototypePtr const& other); + explicit Capture(std::string const& name); + // Note that this operator is delibrately unconventional + IOperandPrototypePtr operator=(IOperandPrototypePtr const& other); // NOLINT private: std::string name_{}; }; /// @{ - template OperandPrototypePtr Call(std::string const& name, Args... args); - OperandPrototypePtr CallByNameOnly(std::string const& name); - OperandPrototypePtr BitCast(OperandPrototypePtr arg); - OperandPrototypePtr Branch(OperandPrototypePtr cond, OperandPrototypePtr arg1, OperandPrototypePtr arg2); - OperandPrototypePtr Load(OperandPrototypePtr arg); - OperandPrototypePtr Store(OperandPrototypePtr target, OperandPrototypePtr value); + template IOperandPrototypePtr call(std::string const& name, Args... args); + IOperandPrototypePtr callByNameOnly(std::string const& name); + IOperandPrototypePtr bitCast(IOperandPrototypePtr const& arg); + IOperandPrototypePtr branch( + IOperandPrototypePtr const& cond, + IOperandPrototypePtr const& arg1, + IOperandPrototypePtr const& arg2); + IOperandPrototypePtr load(IOperandPrototypePtr const& arg); + IOperandPrototypePtr store(IOperandPrototypePtr const& target, IOperandPrototypePtr const& value); /// @} /// @{ diff --git a/src/Passes/Source/Rules/Notation/Store.cpp b/src/Passes/Source/Rules/Notation/Store.cpp index dab507210a..f772d9c013 100644 --- a/src/Passes/Source/Rules/Notation/Store.cpp +++ b/src/Passes/Source/Rules/Notation/Store.cpp @@ -16,15 +16,15 @@ namespace quantum namespace notation { - using OperandPrototypePtr = std::shared_ptr; + using IOperandPrototypePtr = std::shared_ptr; - OperandPrototypePtr Store(OperandPrototypePtr target, OperandPrototypePtr value) + IOperandPrototypePtr store(IOperandPrototypePtr const& target, IOperandPrototypePtr const& value) { auto ret = std::make_shared(); ret->addChild(target); ret->addChild(value); - return static_cast(ret); + return static_cast(ret); } } // namespace notation diff --git a/src/Passes/Source/Rules/OperandPrototype.cpp b/src/Passes/Source/Rules/OperandPrototype.cpp index bfb0647e05..1473c0e2aa 100644 --- a/src/Passes/Source/Rules/OperandPrototype.cpp +++ b/src/Passes/Source/Rules/OperandPrototype.cpp @@ -8,8 +8,8 @@ namespace microsoft namespace quantum { - OperandPrototype::~OperandPrototype() = default; - bool OperandPrototype::matchChildren(Value* value, Captures& captures) const + IOperandPrototype::~IOperandPrototype() = default; + bool IOperandPrototype::matchChildren(Value* value, Captures& captures) const { auto user = llvm::dyn_cast(value); if (!children_.empty()) @@ -41,27 +41,27 @@ namespace quantum } // llvm::errs() << "SUCCESS MATCH: " << *value << " " << user->getNumOperands() << "\n"; - // TODO: Check other possibilities for value + // TODO(tfr): Check other possibilities for value return true; } - void OperandPrototype::addChild(Child const& child) + void IOperandPrototype::addChild(Child const& child) { children_.push_back(child); } - void OperandPrototype::enableCapture(std::string capture_name) + void IOperandPrototype::enableCapture(std::string capture_name) { - capture_name_ = capture_name; + capture_name_ = std::move(capture_name); } - bool OperandPrototype::fail(Value* /*value*/, Captures& /*captures*/) const + bool IOperandPrototype::fail(Value* /*value*/, Captures& /*captures*/) const { return false; } - bool OperandPrototype::success(Value* value, Captures& captures) const + bool IOperandPrototype::success(Value* value, Captures& captures) const { capture(value, captures); @@ -73,7 +73,7 @@ namespace quantum return ret; } - void OperandPrototype::capture(Value* value, Captures& captures) const + void IOperandPrototype::capture(Value* value, Captures& captures) const { if (!capture_name_.empty()) { @@ -81,7 +81,7 @@ namespace quantum } } - void OperandPrototype::uncapture(Value* /*value*/, Captures& captures) const + void IOperandPrototype::uncapture(Value* /*value*/, Captures& captures) const { if (!capture_name_.empty()) { diff --git a/src/Passes/Source/Rules/OperandPrototype.hpp b/src/Passes/Source/Rules/OperandPrototype.hpp index 6d615370c4..5f910176fc 100644 --- a/src/Passes/Source/Rules/OperandPrototype.hpp +++ b/src/Passes/Source/Rules/OperandPrototype.hpp @@ -12,22 +12,22 @@ namespace microsoft namespace quantum { - /// OperandPrototype describes an IR pattern and allows matching against + /// IOperandPrototype describes an IR pattern and allows matching against /// LLVMs llvm::Value type. - class OperandPrototype + class IOperandPrototype { public: using Instruction = llvm::Instruction; using String = std::string; using Value = llvm::Value; - using Child = std::shared_ptr; + using Child = std::shared_ptr; using Children = std::vector; using Captures = std::unordered_map; /// Constructors and desctructors /// @{ - OperandPrototype() = default; - virtual ~OperandPrototype(); + IOperandPrototype() = default; + virtual ~IOperandPrototype(); /// @} /// Interface functions @@ -52,7 +52,7 @@ namespace quantum protected: /// Function to indicate match success or failure. Either of these /// must be called prior to return from an implementation of - /// OperandPrototype::match. + /// IOperandPrototype::match. /// @{ bool fail(Value* value, Captures& captures) const; bool success(Value* value, Captures& captures) const; @@ -69,7 +69,7 @@ namespace quantum /// @{ /// Shallow copy of the operand to allow name change /// of the capture - void copyPropertiesFrom(OperandPrototype const& other) + void copyPropertiesFrom(IOperandPrototype const& other) { capture_name_ = other.capture_name_; children_ = other.children_; diff --git a/src/Passes/Source/Rules/Operands/Any.hpp b/src/Passes/Source/Rules/Operands/Any.hpp index 51fe59a18e..3a9bd4ad69 100644 --- a/src/Passes/Source/Rules/Operands/Any.hpp +++ b/src/Passes/Source/Rules/Operands/Any.hpp @@ -14,7 +14,7 @@ namespace microsoft namespace quantum { - class AnyPattern : public OperandPrototype + class AnyPattern : public IOperandPrototype { public: AnyPattern(); diff --git a/src/Passes/Source/Rules/Operands/Call.hpp b/src/Passes/Source/Rules/Operands/Call.hpp index 6e8d81fd2b..11c6eafa08 100644 --- a/src/Passes/Source/Rules/Operands/Call.hpp +++ b/src/Passes/Source/Rules/Operands/Call.hpp @@ -14,11 +14,11 @@ namespace microsoft namespace quantum { - class CallPattern : public OperandPrototype + class CallPattern : public IOperandPrototype { public: using String = std::string; - CallPattern(String const& name); + explicit CallPattern(String const& name); ~CallPattern() override; diff --git a/src/Passes/Source/Rules/Operands/Instruction.hpp b/src/Passes/Source/Rules/Operands/Instruction.hpp index 6de1349cae..1cb8d8b96d 100644 --- a/src/Passes/Source/Rules/Operands/Instruction.hpp +++ b/src/Passes/Source/Rules/Operands/Instruction.hpp @@ -14,10 +14,10 @@ namespace microsoft namespace quantum { - template class InstructionPattern : public OperandPrototype + template class InstructionPattern : public IOperandPrototype { public: - using OperandPrototype::OperandPrototype; + using IOperandPrototype::IOperandPrototype; ~InstructionPattern() override; bool match(Value* instr, Captures& captures) const override; Child copy() const override; diff --git a/src/Passes/Source/Rules/ReplacementRule.cpp b/src/Passes/Source/Rules/ReplacementRule.cpp index 81c2f10000..486d429f95 100644 --- a/src/Passes/Source/Rules/ReplacementRule.cpp +++ b/src/Passes/Source/Rules/ReplacementRule.cpp @@ -8,13 +8,13 @@ namespace microsoft namespace quantum { - ReplacementRule::ReplacementRule(OperandPrototypePtr&& pattern, ReplaceFunction&& replacer) + ReplacementRule::ReplacementRule(IOperandPrototypePtr&& pattern, ReplaceFunction&& replacer) : pattern_{std::move(pattern)} , replacer_{std::move(replacer)} { } - void ReplacementRule::setPattern(OperandPrototypePtr&& pattern) + void ReplacementRule::setPattern(IOperandPrototypePtr&& pattern) { pattern_ = std::move(pattern); } diff --git a/src/Passes/Source/Rules/ReplacementRule.hpp b/src/Passes/Source/Rules/ReplacementRule.hpp index b0c610f2c5..4edd61997a 100644 --- a/src/Passes/Source/Rules/ReplacementRule.hpp +++ b/src/Passes/Source/Rules/ReplacementRule.hpp @@ -23,25 +23,25 @@ namespace quantum class ReplacementRule { public: - using Captures = OperandPrototype::Captures; - using Instruction = llvm::Instruction; - using Value = llvm::Value; - using OperandPrototypePtr = std::shared_ptr; - using Builder = llvm::IRBuilder<>; - using Replacements = std::vector>; - using ReplaceFunction = std::function; + using Captures = IOperandPrototype::Captures; + using Instruction = llvm::Instruction; + using Value = llvm::Value; + using IOperandPrototypePtr = std::shared_ptr; + using Builder = llvm::IRBuilder<>; + using Replacements = std::vector>; + using ReplaceFunction = std::function; /// Constructorss and destructors /// @{ ReplacementRule() = default; - ReplacementRule(OperandPrototypePtr&& pattern, ReplaceFunction&& replacer); + ReplacementRule(IOperandPrototypePtr&& pattern, ReplaceFunction&& replacer); /// @} /// Rule configuration /// @{ /// Sets the pattern describing logic to be replaced. - void setPattern(OperandPrototypePtr&& pattern); + void setPattern(IOperandPrototypePtr&& pattern); /// Sets the replacer logic which given a successful match will perform /// a replacement on the IR. @@ -60,8 +60,8 @@ namespace quantum bool replace(Builder& builder, Value* value, Captures& captures, Replacements& replacements) const; /// @} private: - OperandPrototypePtr pattern_{nullptr}; - ReplaceFunction replacer_{nullptr}; + IOperandPrototypePtr pattern_{nullptr}; + ReplaceFunction replacer_{nullptr}; }; } // namespace quantum diff --git a/src/Passes/Source/Rules/RuleSet.hpp b/src/Passes/Source/Rules/RuleSet.hpp index 8f335ea065..53490bfe6b 100644 --- a/src/Passes/Source/Rules/RuleSet.hpp +++ b/src/Passes/Source/Rules/RuleSet.hpp @@ -26,7 +26,7 @@ namespace quantum using ReplacementRulePtr = std::shared_ptr; using Rules = std::vector; using Replacements = ReplacementRule::Replacements; - using Captures = OperandPrototype::Captures; + using Captures = IOperandPrototype::Captures; using Instruction = llvm::Instruction; using Value = llvm::Value; using Builder = ReplacementRule::Builder; From 8a5825069b43984a791c8a1f6b0bfdd629fa4f61 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Sat, 14 Aug 2021 09:35:20 +0200 Subject: [PATCH 093/106] Deprecating LL tests --- .../tests/QubitAllocationAnalysis/{case1.ll => case1.deprecated} | 0 .../tests/QubitAllocationAnalysis/{case2.ll => case2.deprecrated} | 0 ...{static-qubit-arrays-1.ll => static-qubit-arrays-1.deprecated} | 0 ...{static-qubit-arrays-2.ll => static-qubit-arrays-2.deprecated} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename src/Passes/tests/QubitAllocationAnalysis/{case1.ll => case1.deprecated} (100%) rename src/Passes/tests/QubitAllocationAnalysis/{case2.ll => case2.deprecrated} (100%) rename src/Passes/tests/QubitAllocationAnalysis/inputs/{static-qubit-arrays-1.ll => static-qubit-arrays-1.deprecated} (100%) rename src/Passes/tests/QubitAllocationAnalysis/inputs/{static-qubit-arrays-2.ll => static-qubit-arrays-2.deprecated} (100%) diff --git a/src/Passes/tests/QubitAllocationAnalysis/case1.ll b/src/Passes/tests/QubitAllocationAnalysis/case1.deprecated similarity index 100% rename from src/Passes/tests/QubitAllocationAnalysis/case1.ll rename to src/Passes/tests/QubitAllocationAnalysis/case1.deprecated diff --git a/src/Passes/tests/QubitAllocationAnalysis/case2.ll b/src/Passes/tests/QubitAllocationAnalysis/case2.deprecrated similarity index 100% rename from src/Passes/tests/QubitAllocationAnalysis/case2.ll rename to src/Passes/tests/QubitAllocationAnalysis/case2.deprecrated diff --git a/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.ll b/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.deprecated similarity index 100% rename from src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.ll rename to src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.deprecated diff --git a/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.ll b/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.deprecated similarity index 100% rename from src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.ll rename to src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.deprecated From b83c3d93c64e597a2b414c3c041f3de3281a7fa0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Troels=20F=2E=20R=C3=B8nnow?= <574350+troelsfr@users.noreply.github.com> Date: Mon, 16 Aug 2021 08:12:36 +0200 Subject: [PATCH 094/106] Update src/Passes/README.md Co-authored-by: Stefan J. Wernli --- src/Passes/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Passes/README.md b/src/Passes/README.md index ac8cebb154..7acd987671 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -453,7 +453,7 @@ Target: https://stackoverflow.com/questions/22239801/how-to-load-llvm-bitcode-file-from-an-ifstream/22241953 -## Load passes LLVM passes +## Load LLVM passes https://llvm.org/docs/tutorial/MyFirstLanguageFrontend/LangImpl04.html From c3712a957e36f7343689f6d9c6e7061fbbfac701 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Troels=20F=2E=20R=C3=B8nnow?= <574350+troelsfr@users.noreply.github.com> Date: Mon, 16 Aug 2021 08:17:52 +0200 Subject: [PATCH 095/106] Update src/Passes/Source/Rules/OperandPrototype.cpp --- src/Passes/Source/Rules/OperandPrototype.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Passes/Source/Rules/OperandPrototype.cpp b/src/Passes/Source/Rules/OperandPrototype.cpp index 1473c0e2aa..d48818d352 100644 --- a/src/Passes/Source/Rules/OperandPrototype.cpp +++ b/src/Passes/Source/Rules/OperandPrototype.cpp @@ -35,7 +35,6 @@ namespace quantum ++i; } - // llvm::errs() << "SUCCESS MATCH: " << *value << "\n"; return true; } From 1941b523284b808eb42d9f30b3368135cea062e2 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 16 Aug 2021 09:16:37 +0200 Subject: [PATCH 096/106] PR revisions --- src/Passes/Dockerfile | 34 ++++++++ src/Passes/README.md | 42 +--------- .../AllocationManager/AllocationManager.cpp | 2 + .../AllocationManager/AllocationManager.hpp | 3 + src/Passes/Source/Apps/CMakeLists.txt | 2 +- .../ExpandStaticAllocation.cpp | 2 +- .../ExpandStaticAllocation.hpp | 4 +- .../LibQirAllocationAnalysis.cpp} | 14 ++-- .../QirAllocationAnalysis.cpp} | 22 +++--- .../QirAllocationAnalysis.hpp | 77 +++++++++++++++++++ .../QubitAllocationAnalysis.hpp | 77 ------------------- .../ResourceRemapper/LibResourceRemapper.cpp | 38 --------- .../ResourceRemapper/ResourceRemapper.cpp | 30 -------- .../ResourceRemapper/ResourceRemapper.hpp | 37 --------- src/Passes/Source/Profiles/BaseProfile.cpp | 4 +- src/Passes/Source/Rules/Factory.cpp | 3 +- src/Passes/Source/Rules/Notation/Notation.cpp | 8 +- src/Passes/Source/Rules/OperandPrototype.cpp | 1 - .../docs/base-profile-transformations.md | 2 +- src/Passes/docs/continous-integration.md | 4 +- .../ConstSizeArray/Comparison.cpp | 18 ----- .../examples/QubitAllocationAnalysis/Makefile | 12 +-- .../QubitAllocationAnalysis/README.md | 34 ++++---- .../Makefile | 4 +- .../TeleportChain.csproj} | 0 .../TeleportChain.qs} | 0 .../analysis-example.ll | 39 ++++++---- .../QubitAllocationAnalysis/case1.deprecated | 2 +- .../QubitAllocationAnalysis/case2.deprecrated | 2 +- .../inputs/static-qubit-arrays-1.deprecated | 4 +- .../inputs/static-qubit-arrays-2.deprecated | 4 +- 31 files changed, 204 insertions(+), 321 deletions(-) create mode 100644 src/Passes/Dockerfile rename src/Passes/Source/Passes/{QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp => QirAllocationAnalysis/LibQirAllocationAnalysis.cpp} (67%) rename src/Passes/Source/Passes/{QubitAllocationAnalysis/QubitAllocationAnalysis.cpp => QirAllocationAnalysis/QirAllocationAnalysis.cpp} (69%) create mode 100644 src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.hpp delete mode 100644 src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp delete mode 100644 src/Passes/Source/Passes/ResourceRemapper/LibResourceRemapper.cpp delete mode 100644 src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.cpp delete mode 100644 src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.hpp delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Comparison.cpp rename src/Passes/examples/QubitAllocationAnalysis/{ConstSizeArray => TeleportChain}/Makefile (63%) rename src/Passes/examples/QubitAllocationAnalysis/{ConstSizeArray/ConstSizeArray.csproj => TeleportChain/TeleportChain.csproj} (100%) rename src/Passes/examples/QubitAllocationAnalysis/{ConstSizeArray/ConstSizeArray.qs => TeleportChain/TeleportChain.qs} (100%) diff --git a/src/Passes/Dockerfile b/src/Passes/Dockerfile new file mode 100644 index 0000000000..8e5c9f2419 --- /dev/null +++ b/src/Passes/Dockerfile @@ -0,0 +1,34 @@ +FROM ubuntu:20.04 as bazel + +# basic dependencies - +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update -y && \ + apt-get install -y + +RUN apt-get install -y curl \ + pkg-config \ + findutils \ + wget + +# build dependencies +RUN apt install -y clang-11 cmake clang-format-11 clang-tidy-11 && \ + apt-get install -y llvm-11 lldb-11 llvm-11-dev libllvm11 llvm-11-runtime && \ + export CC=clang-11 && \ + export CXX=clang++ + +# Python +RUN apt install -y python3 && \ + update-alternatives --install /usr/bin/python python /usr/bin/python3 0 + +ADD . /src/ +RUN cd /src/ && \ + pip install -r requirements.txt && \ + chmod +x manage + +# running the build +RUN ./manage runci + +ENV CC=clang-11 \ + CXX=clang++-11 \ + PYTHONUNBUFFERED=1 \ + PYTHON_BIN_PATH=/usr/bin/python3 diff --git a/src/Passes/README.md b/src/Passes/README.md index 7acd987671..d3ab94b54a 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -7,13 +7,13 @@ Once the project is built (see next sections), you can generate a new QIR as follows: ```sh -./Source/Apps/qat --generate --profile baseProfile ../examples/QubitAllocationAnalysis/analysis-example.ll +./Source/Apps/qat --generate --profile baseProfile ../examples/QirAllocationAnalysis/analysis-example.ll ``` Likewise, you can validate that a QIR follows a specification by running: ```sh -./Source/Apps/qat --validate --profile baseProfile ../examples/QubitAllocationAnalysis/analysis-example.ll +./Source/Apps/qat --validate --profile baseProfile ../examples/QirAllocationAnalysis/analysis-example.ll ``` ## Example @@ -448,41 +448,3 @@ Target: ``` ./qat -profile=base-profile.yml -S file.ir > adapted.ir ``` - -## Loading IR - -https://stackoverflow.com/questions/22239801/how-to-load-llvm-bitcode-file-from-an-ifstream/22241953 - -## Load LLVM passes - -https://llvm.org/docs/tutorial/MyFirstLanguageFrontend/LangImpl04.html - -## Load custom passes - -## How to run analysis and transformation - -https://stackoverflow.com/questions/53501830/running-standard-optimization-passes-on-a-llvm-module - -## Profile specification - -```yaml -name: profile-name -displayName: Profile Name -pipeline: - - passName: loopUnroll - - passName: functionInline - - passName: staticQubitAllocation - - passName: staticMemory - - passName: ignoreCall - config: - functionName: -specification: - - passName: requireNoArithmetic - - passName: requireNoStaticAllocation - - passName: requireReducedFunctionsAvailability - config: - functions: - - -``` - -Decent YAML library: https://github.com/jbeder/yaml-cpp diff --git a/src/Passes/Source/AllocationManager/AllocationManager.cpp b/src/Passes/Source/AllocationManager/AllocationManager.cpp index 2c150eac4b..ca672ea8a1 100644 --- a/src/Passes/Source/AllocationManager/AllocationManager.cpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.cpp @@ -84,6 +84,8 @@ namespace quantum return mappings_[index].start; } + void AllocationManager::release() {} + void AllocationManager::release(String const& name) { auto it = name_to_index_.find(name); diff --git a/src/Passes/Source/AllocationManager/AllocationManager.hpp b/src/Passes/Source/AllocationManager/AllocationManager.hpp index 5d4dc3753c..90805cff4e 100644 --- a/src/Passes/Source/AllocationManager/AllocationManager.hpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.hpp @@ -58,6 +58,9 @@ namespace quantum /// Gets the offset of a name segment or address. Index getOffset(String const& name) const; + /// Releases unnamed address. + void release(); + /// Releases the named segment or address. void release(String const& name); diff --git a/src/Passes/Source/Apps/CMakeLists.txt b/src/Passes/Source/Apps/CMakeLists.txt index 471cae5f5b..8a3be9264a 100644 --- a/src/Passes/Source/Apps/CMakeLists.txt +++ b/src/Passes/Source/Apps/CMakeLists.txt @@ -1,4 +1,4 @@ add_executable(qat Qat/Qat.cpp Qat/LlvmAnalysis.cpp) target_link_libraries(qat ${llvm_libs}) -target_link_libraries(qat ExpandStaticAllocation QubitAllocationAnalysis TransformationRule Rules AllocationManager Commandline Profiles) \ No newline at end of file +target_link_libraries(qat ExpandStaticAllocation QirAllocationAnalysis TransformationRule Rules AllocationManager Commandline Profiles) \ No newline at end of file diff --git a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp index 45b97a3aee..5b6eee081a 100644 --- a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp +++ b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp @@ -35,7 +35,7 @@ namespace quantum std::vector remaining_arguments{}; auto callee_function = call_instr->getCalledFunction(); - auto& use_quantum = fam.getResult(*callee_function); + auto& use_quantum = fam.getResult(*callee_function); if (use_quantum.value) { diff --git a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp index dcad921310..3abd25050d 100644 --- a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp +++ b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" +#include "Passes/QirAllocationAnalysis/QirAllocationAnalysis.hpp" #include "Llvm/Llvm.hpp" @@ -16,7 +16,7 @@ namespace quantum class ExpandStaticAllocationPass : public llvm::PassInfoMixin { public: - using QubitAllocationResult = QubitAllocationAnalysisAnalytics::Result; + using QubitAllocationResult = QirAllocationAnalysisAnalytics::Result; using ConstantArguments = std::unordered_map; /// Constructors and destructors diff --git a/src/Passes/Source/Passes/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp b/src/Passes/Source/Passes/QirAllocationAnalysis/LibQirAllocationAnalysis.cpp similarity index 67% rename from src/Passes/Source/Passes/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp rename to src/Passes/Source/Passes/QirAllocationAnalysis/LibQirAllocationAnalysis.cpp index bb9f5529c9..f802656471 100644 --- a/src/Passes/Source/Passes/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp +++ b/src/Passes/Source/Passes/QirAllocationAnalysis/LibQirAllocationAnalysis.cpp @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" +#include "Passes/QirAllocationAnalysis/QirAllocationAnalysis.hpp" #include "Llvm/Llvm.hpp" @@ -11,18 +11,18 @@ namespace { // Interface to plugin -llvm::PassPluginLibraryInfo getQubitAllocationAnalysisPluginInfo() +llvm::PassPluginLibraryInfo getQirAllocationAnalysisPluginInfo() { using namespace microsoft::quantum; using namespace llvm; - return {LLVM_PLUGIN_API_VERSION, "QubitAllocationAnalysis", LLVM_VERSION_STRING, [](PassBuilder& pb) { + return {LLVM_PLUGIN_API_VERSION, "QirAllocationAnalysis", LLVM_VERSION_STRING, [](PassBuilder& pb) { // Registering a printer for the anaylsis pb.registerPipelineParsingCallback( [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { if (name == "print") { - fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); + fpm.addPass(QirAllocationAnalysisPrinter(llvm::errs())); return true; } return false; @@ -30,12 +30,12 @@ llvm::PassPluginLibraryInfo getQubitAllocationAnalysisPluginInfo() pb.registerVectorizerStartEPCallback( [](llvm::FunctionPassManager& fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { - fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); + fpm.addPass(QirAllocationAnalysisPrinter(llvm::errs())); }); // Registering the analysis module pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager& fam) { - fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); + fam.registerPass([] { return QirAllocationAnalysisAnalytics(); }); }); }}; } @@ -44,5 +44,5 @@ llvm::PassPluginLibraryInfo getQubitAllocationAnalysisPluginInfo() extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getQubitAllocationAnalysisPluginInfo(); + return getQirAllocationAnalysisPluginInfo(); } diff --git a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp b/src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.cpp similarity index 69% rename from src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp rename to src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.cpp index 70fc168a7f..4648b2540d 100644 --- a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp +++ b/src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.cpp @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" +#include "Passes/QirAllocationAnalysis/QirAllocationAnalysis.hpp" #include "Llvm/Llvm.hpp" @@ -14,7 +14,7 @@ namespace microsoft namespace quantum { - QubitAllocationAnalysisAnalytics::Result QubitAllocationAnalysisAnalytics::run( + QirAllocationAnalysisAnalytics::Result QirAllocationAnalysisAnalytics::run( llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/) { @@ -29,8 +29,8 @@ namespace quantum } auto target_function = call_instr->getCalledFunction(); auto name = target_function->getName(); - // llvm::errs() << "Testing " << name << " : " << *call_instr << "\n"; + // Checking for qubit allocation if (name == "__quantum__rt__qubit_allocate") { return {true}; @@ -41,31 +41,27 @@ namespace quantum return {true}; } + // Checking for result allocation if (name == "__quantum__qis__m__body") { return {true}; } - - if (name == "__quantum__qis__z__body") - { - return {true}; - } } } return {false}; } - QubitAllocationAnalysisPrinter::QubitAllocationAnalysisPrinter(llvm::raw_ostream& out_stream) + QirAllocationAnalysisPrinter::QirAllocationAnalysisPrinter(llvm::raw_ostream& out_stream) : out_stream_(out_stream) { } - llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run( + llvm::PreservedAnalyses QirAllocationAnalysisPrinter::run( llvm::Function& function, llvm::FunctionAnalysisManager& fam) { - auto& result = fam.getResult(function); + auto& result = fam.getResult(function); if (result.value) { @@ -78,12 +74,12 @@ namespace quantum return llvm::PreservedAnalyses::all(); } - bool QubitAllocationAnalysisPrinter::isRequired() + bool QirAllocationAnalysisPrinter::isRequired() { return true; } - llvm::AnalysisKey QubitAllocationAnalysisAnalytics::Key; + llvm::AnalysisKey QirAllocationAnalysisAnalytics::Key; } // namespace quantum } // namespace microsoft diff --git a/src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.hpp b/src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.hpp new file mode 100644 index 0000000000..cafc311941 --- /dev/null +++ b/src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.hpp @@ -0,0 +1,77 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm/Llvm.hpp" + +#include +#include + +namespace microsoft +{ +namespace quantum +{ + + class QirAllocationAnalysisAnalytics : public llvm::AnalysisInfoMixin + { + public: + using String = std::string; + + struct Result + { + bool value{false}; + }; + + /// Constructors and destructors + /// @{ + QirAllocationAnalysisAnalytics() = default; + QirAllocationAnalysisAnalytics(QirAllocationAnalysisAnalytics const&) = delete; + QirAllocationAnalysisAnalytics(QirAllocationAnalysisAnalytics&&) = default; + ~QirAllocationAnalysisAnalytics() = default; + /// @} + + /// Operators + /// @{ + QirAllocationAnalysisAnalytics& operator=(QirAllocationAnalysisAnalytics const&) = delete; + QirAllocationAnalysisAnalytics& operator=(QirAllocationAnalysisAnalytics&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); + /// @} + + private: + static llvm::AnalysisKey Key; // NOLINT + friend struct llvm::AnalysisInfoMixin; + }; + + class QirAllocationAnalysisPrinter : public llvm::PassInfoMixin + { + public: + /// Constructors and destructors + /// @{ + explicit QirAllocationAnalysisPrinter(llvm::raw_ostream& out_stream); + QirAllocationAnalysisPrinter() = delete; + QirAllocationAnalysisPrinter(QirAllocationAnalysisPrinter const&) = delete; + QirAllocationAnalysisPrinter(QirAllocationAnalysisPrinter&&) = default; + ~QirAllocationAnalysisPrinter() = default; + /// @} + + /// Operators + /// @{ + QirAllocationAnalysisPrinter& operator=(QirAllocationAnalysisPrinter const&) = delete; + QirAllocationAnalysisPrinter& operator=(QirAllocationAnalysisPrinter&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} + private: + llvm::raw_ostream& out_stream_; + }; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp b/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp deleted file mode 100644 index 58203d5d11..0000000000 --- a/src/Passes/Source/Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp +++ /dev/null @@ -1,77 +0,0 @@ -#pragma once -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -#include "Llvm/Llvm.hpp" - -#include -#include - -namespace microsoft -{ -namespace quantum -{ - - class QubitAllocationAnalysisAnalytics : public llvm::AnalysisInfoMixin - { - public: - using String = std::string; - - struct Result - { - bool value{false}; - }; - - /// Constructors and destructors - /// @{ - QubitAllocationAnalysisAnalytics() = default; - QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics const&) = delete; - QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics&&) = default; - ~QubitAllocationAnalysisAnalytics() = default; - /// @} - - /// Operators - /// @{ - QubitAllocationAnalysisAnalytics& operator=(QubitAllocationAnalysisAnalytics const&) = delete; - QubitAllocationAnalysisAnalytics& operator=(QubitAllocationAnalysisAnalytics&&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); - /// @} - - private: - static llvm::AnalysisKey Key; // NOLINT - friend struct llvm::AnalysisInfoMixin; - }; - - class QubitAllocationAnalysisPrinter : public llvm::PassInfoMixin - { - public: - /// Constructors and destructors - /// @{ - explicit QubitAllocationAnalysisPrinter(llvm::raw_ostream& out_stream); - QubitAllocationAnalysisPrinter() = delete; - QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter const&) = delete; - QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter&&) = default; - ~QubitAllocationAnalysisPrinter() = default; - /// @} - - /// Operators - /// @{ - QubitAllocationAnalysisPrinter& operator=(QubitAllocationAnalysisPrinter const&) = delete; - QubitAllocationAnalysisPrinter& operator=(QubitAllocationAnalysisPrinter&&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); - static bool isRequired(); - /// @} - private: - llvm::raw_ostream& out_stream_; - }; - -} // namespace quantum -} // namespace microsoft diff --git a/src/Passes/Source/Passes/ResourceRemapper/LibResourceRemapper.cpp b/src/Passes/Source/Passes/ResourceRemapper/LibResourceRemapper.cpp deleted file mode 100644 index 3e53131e7a..0000000000 --- a/src/Passes/Source/Passes/ResourceRemapper/LibResourceRemapper.cpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -#include "ResourceRemapper/ResourceRemapper.hpp" - -#include "Llvm/Llvm.hpp" - -#include -#include - -namespace -{ -llvm::PassPluginLibraryInfo getResourceRemapperPluginInfo() -{ - using namespace microsoft::quantum; - using namespace llvm; - - return {LLVM_PLUGIN_API_VERSION, "ResourceRemapper", LLVM_VERSION_STRING, [](PassBuilder& pb) { - // Registering the pass - pb.registerPipelineParsingCallback( - [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { - if (name == "resource-remapper") - { - fpm.addPass(ResourceRemapperPass()); - return true; - } - - return false; - }); - }}; -} -} // namespace - -// Interface for loading the plugin -extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() -{ - return getResourceRemapperPluginInfo(); -} diff --git a/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.cpp b/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.cpp deleted file mode 100644 index 060e11f43b..0000000000 --- a/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.cpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -#include "ResourceRemapper/ResourceRemapper.hpp" - -#include "Llvm/Llvm.hpp" - -#include -#include - -namespace microsoft -{ -namespace quantum -{ - llvm::PreservedAnalyses ResourceRemapperPass::run(llvm::Function& function, llvm::FunctionAnalysisManager& /*fam*/) - { - // Pass body - - llvm::errs() << "Implement your pass here: " << function.getName() << "\n"; - - return llvm::PreservedAnalyses::all(); - } - - bool ResourceRemapperPass::isRequired() - { - return true; - } - -} // namespace quantum -} // namespace microsoft diff --git a/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.hpp b/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.hpp deleted file mode 100644 index 03c6e564cc..0000000000 --- a/src/Passes/Source/Passes/ResourceRemapper/ResourceRemapper.hpp +++ /dev/null @@ -1,37 +0,0 @@ -#pragma once -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -#include "Llvm/Llvm.hpp" - -namespace microsoft -{ -namespace quantum -{ - - class ResourceRemapperPass : public llvm::PassInfoMixin - { - public: - /// Constructors and destructors - /// @{ - ResourceRemapperPass() = default; - ResourceRemapperPass(ResourceRemapperPass const&) = default; - ResourceRemapperPass(ResourceRemapperPass&&) = default; - ~ResourceRemapperPass() = default; - /// @} - - /// Operators - /// @{ - ResourceRemapperPass& operator=(ResourceRemapperPass const&) = default; - ResourceRemapperPass& operator=(ResourceRemapperPass&&) = default; - /// @} - - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); - static bool isRequired(); - /// @} - }; - -} // namespace quantum -} // namespace microsoft diff --git a/src/Passes/Source/Profiles/BaseProfile.cpp b/src/Passes/Source/Profiles/BaseProfile.cpp index 8446224b3a..31889a6271 100644 --- a/src/Passes/Source/Profiles/BaseProfile.cpp +++ b/src/Passes/Source/Profiles/BaseProfile.cpp @@ -2,7 +2,7 @@ // Licensed under the MIT License. #include "Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp" -#include "Passes/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" +#include "Passes/QirAllocationAnalysis/QirAllocationAnalysis.hpp" #include "Passes/TransformationRule/TransformationRule.hpp" #include "Profiles/BaseProfile.hpp" #include "Rules/Factory.hpp" @@ -91,7 +91,7 @@ namespace quantum void BaseProfile::addFunctionAnalyses(FunctionAnalysisManager& fam) { - fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); + fam.registerPass([] { return QirAllocationAnalysisAnalytics(); }); } } // namespace quantum diff --git a/src/Passes/Source/Rules/Factory.cpp b/src/Passes/Source/Rules/Factory.cpp index 5528fb5ffa..e383eb2d04 100644 --- a/src/Passes/Source/Rules/Factory.cpp +++ b/src/Passes/Source/Rules/Factory.cpp @@ -32,8 +32,7 @@ namespace quantum void RuleFactory::removeFunctionCall(String const& name) { - ReplacementRule ret{callByNameOnly(name), deleteInstruction()}; - addRule(std::move(ret)); + addRule({callByNameOnly(name), deleteInstruction()}); } void RuleFactory::useStaticQubitArrayAllocation() diff --git a/src/Passes/Source/Rules/Notation/Notation.cpp b/src/Passes/Source/Rules/Notation/Notation.cpp index ebb6ad1ef3..aede0716a5 100644 --- a/src/Passes/Source/Rules/Notation/Notation.cpp +++ b/src/Passes/Source/Rules/Notation/Notation.cpp @@ -17,7 +17,13 @@ namespace quantum { namespace notation { - + /// Replacement function to delete an instruction. This is a shorthand notation for deleting + /// an instruction that can be used with a custom rule when building a ruleset. This function + /// can be used with shorthand notation for patterns as follows: + /// ```c++ + /// addRule({callByNameOnly(name), deleteInstruction()}); + /// ``` + /// to delete the instructions that calls functions with the name `name`. std::function - -void QuantumFunction(int32_t nQubits) -{ - volatile uint64_t x = 3; - for (uint64_t i = 0; i < x; ++i) - { - nQubits += nQubits; - } - int32_t qubits[nQubits]; -} - -int main() -{ - QuantumFunction(10); - QuantumFunction(3); - return 0; -} \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index 0318168070..97f49577bb 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -1,15 +1,15 @@ run-expand: build-qaa build-esa analysis-example.ll - opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib \ + opt -load-pass-plugin ../../Debug/Source/Passes/libQirAllocationAnalysis.dylib \ -load-pass-plugin ../../Debug/Source/Passes/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll run: build-qaa analysis-example.ll # TODO(tfr): Add comments - opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll + opt -load-pass-plugin ../../Debug/Source/Passes/libQirAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll run-replace: build-ir build-qaa build-esa analysis-example.ll # opt -loop-unroll -unroll-count=3 -unroll-allow-partial - opt -load-pass-plugin ../../Debug/Source/Passes/libQubitAllocationAnalysis.dylib \ + opt -load-pass-plugin ../../Debug/Source/Passes/libQirAllocationAnalysis.dylib \ -load-pass-plugin ../../Debug/Source/Passes/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll > analysis-example-step1.ll opt -load-pass-plugin ../../Debug/Source/Passes/libTransformationRule.dylib --passes="loop-simplify,loop-unroll,restrict-qir" -S analysis-example-step1.ll > analysis-example-final.ll opt --passes="inline" -S test2.ll | opt -O1 -S @@ -19,7 +19,7 @@ build-prepare: pushd ../../ && mkdir -p Debug && cd Debug && cmake ..&& popd || popd build-qaa: build-prepare - pushd ../../Debug && make QubitAllocationAnalysis && popd || popd + pushd ../../Debug && make QirAllocationAnalysis && popd || popd build-esa: build-prepare pushd ../../Debug && make ExpandStaticAllocation && popd || popd @@ -29,10 +29,10 @@ build-ir: build-prepare analysis-example.ll: - cd ConstSizeArray && make analysis-example.ll + cd TeleportChain && make analysis-example.ll clean: - cd ConstSizeArray && make clean + cd TeleportChain && make clean rm analysis-example.ll rm analysis-example-step1.ll rm analysis-example-final.ll diff --git a/src/Passes/examples/QubitAllocationAnalysis/README.md b/src/Passes/examples/QubitAllocationAnalysis/README.md index 515b641ba4..e704f2a85c 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/README.md +++ b/src/Passes/examples/QubitAllocationAnalysis/README.md @@ -1,4 +1,4 @@ -# QubitAllocationAnalysis +# QirAllocationAnalysis ## Quick start @@ -14,7 +14,7 @@ Running following command make run ``` -will first build the pass, then build the QIR using Q# following by removing the noise using `opt` with optimisation level 1. Finally, it will execute the analysis pass and should provide you with information about qubit allocation in the Q# program defined in `ConstSizeArray/ConstSizeArray.qs`. +will first build the pass, then build the QIR using Q# following by removing the noise using `opt` with optimisation level 1. Finally, it will execute the analysis pass and should provide you with information about qubit allocation in the Q# program defined in `TeleportChain/TeleportChain.qs`. ## Detailed run @@ -26,13 +26,13 @@ cd Debug cmake .. ``` -and then compile the `QubitAllocationAnalysis`: +and then compile the `QirAllocationAnalysis`: ```sh -make QubitAllocationAnalysis +make QirAllocationAnalysis ``` -Next return `examples/QubitAllocationAnalysis` and enter the directory `ConstSizeArray` to build the QIR: +Next return `examples/QirAllocationAnalysis` and enter the directory `TeleportChain` to build the QIR: ```sh make analysis-example.ll @@ -41,20 +41,20 @@ make analysis-example.ll or execute the commands manually, ```sh -dotnet build ConstSizeArray.csproj -opt -S qir/ConstSizeArray.ll -O1 > ../analysis-example.ll +dotnet build TeleportChain.csproj +opt -S qir/TeleportChain.ll -O1 > ../analysis-example.ll make clean ``` -Returning to `examples/QubitAllocationAnalysis`, the pass can now be ran by executing: +Returning to `examples/QirAllocationAnalysis`, the pass can now be ran by executing: ```sh -opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +opt -load-pass-plugin ../../Debug/libs/libQirAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll ``` ## Example cases -Below we will consider a few different examples. You can run them by updating the code in `ConstSizeArray/ConstSizeArray.qs` and executing `make run` from the `examples/QubitAllocationAnalysis` folder subsequently. You will need to delete `analysis-example.ll` between runs. +Below we will consider a few different examples. You can run them by updating the code in `TeleportChain/TeleportChain.qs` and executing `make run` from the `examples/QirAllocationAnalysis` folder subsequently. You will need to delete `analysis-example.ll` between runs. ### Trivially constant @@ -72,8 +72,8 @@ namespace Example { The corresponding QIR is: ``` -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" +; ModuleID = 'qir/TeleportChain.ll' +source_filename = "qir/TeleportChain.ll" %Array = type opaque @@ -92,7 +92,7 @@ entry: Running the pass procudes following output: ``` -opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +opt -load-pass-plugin ../../Debug/libs/libQirAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll Example__QuantumProgram__body ==================== @@ -124,8 +124,8 @@ namespace Example { The corresponding QIR is ``` -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" +; ModuleID = 'qir/TeleportChain.ll' +source_filename = "qir/TeleportChain.ll" %Array = type opaque %String = type opaque @@ -152,7 +152,7 @@ entry: The analyser returns following output: ``` -opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +opt -load-pass-plugin ../../Debug/libs/libQirAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll Example__QuantumProgram__body ==================== @@ -196,7 +196,7 @@ namespace Example { We will omit the QIR in the documenation as it is a long. The output of the anaysis is: ``` -opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +opt -load-pass-plugin ../../Debug/libs/libQirAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll Example__QuantumProgram__body ==================== diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile b/src/Passes/examples/QubitAllocationAnalysis/TeleportChain/Makefile similarity index 63% rename from src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile rename to src/Passes/examples/QubitAllocationAnalysis/TeleportChain/Makefile index 59399d367e..e5af26dbf1 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/TeleportChain/Makefile @@ -1,6 +1,6 @@ analysis-example.ll: - dotnet build ConstSizeArray.csproj - opt -S qir/ConstSizeArray.ll -O1 > ../analysis-example.ll + dotnet build TeleportChain.csproj + opt -S qir/TeleportChain.ll -O1 > ../analysis-example.ll make clean comparison: diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj b/src/Passes/examples/QubitAllocationAnalysis/TeleportChain/TeleportChain.csproj similarity index 100% rename from src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj rename to src/Passes/examples/QubitAllocationAnalysis/TeleportChain/TeleportChain.csproj diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/TeleportChain/TeleportChain.qs similarity index 100% rename from src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs rename to src/Passes/examples/QubitAllocationAnalysis/TeleportChain/TeleportChain.qs diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll index 81257dc78e..bd51c64d95 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -1,19 +1,17 @@ -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" +; ModuleID = 'qir/TeleportChain.ll' +source_filename = "qir/TeleportChain.ll" %Qubit = type opaque %Result = type opaque %Array = type opaque %String = type opaque -@0 = internal constant [3 x i8] c"()\00" - define internal fastcc void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) unnamed_addr { entry: %0 = call fastcc %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) - %1 = call %Result* @__quantum__rt__result_get_one() %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) br i1 %2, label %then0__1, label %continue__1 then0__1: ; preds = %entry @@ -60,7 +58,7 @@ entry: ret void } -define internal fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { +define internal fastcc %Result* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() unnamed_addr { entry: %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() @@ -108,12 +106,11 @@ entry: call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) call void @__quantum__rt__result_update_reference_count(%Result* %27, i32 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 -1) call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) - ret void + ret %Result* %31 } declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr @@ -181,29 +178,37 @@ declare void @__quantum__qis__x(%Qubit*) local_unnamed_addr declare void @__quantum__qis__z(%Qubit*) local_unnamed_addr -declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr - declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr -define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { +define i8 @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { entry: - call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - ret void + %0 = call fastcc %Result* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %1 = call %Result* @__quantum__rt__result_get_zero() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + %not. = xor i1 %2, true + %3 = sext i1 %not. to i8 + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + ret i8 %3 } +declare %Result* @__quantum__rt__result_get_zero() local_unnamed_addr + define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() local_unnamed_addr #1 { entry: - call fastcc void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() - %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i64 0, i64 0)) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + %0 = call fastcc %Result* @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %1 = call %String* @__quantum__rt__result_to_string(%Result* %0) + call void @__quantum__rt__message(%String* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) ret void } declare void @__quantum__rt__message(%String*) local_unnamed_addr +declare %String* @__quantum__rt__result_to_string(%Result*) local_unnamed_addr + declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr attributes #0 = { "InteropFriendly" } diff --git a/src/Passes/tests/QubitAllocationAnalysis/case1.deprecated b/src/Passes/tests/QubitAllocationAnalysis/case1.deprecated index cab5557980..230a3dac66 100644 --- a/src/Passes/tests/QubitAllocationAnalysis/case1.deprecated +++ b/src/Passes/tests/QubitAllocationAnalysis/case1.deprecated @@ -1,4 +1,4 @@ -; RUN: opt -load-pass-plugin %shlibdir/libQubitAllocationAnalysis%shlibext -passes="print" %S/inputs/static-qubit-arrays-1.ll -disable-output 2>&1\ +; RUN: opt -load-pass-plugin %shlibdir/libQirAllocationAnalysis%shlibext -passes="print" %S/inputs/static-qubit-arrays-1.ll -disable-output 2>&1\ ; RUN: | FileCheck %s ;------------------------------------------------------------------------------ diff --git a/src/Passes/tests/QubitAllocationAnalysis/case2.deprecrated b/src/Passes/tests/QubitAllocationAnalysis/case2.deprecrated index 7f90c61a50..3a13a71027 100644 --- a/src/Passes/tests/QubitAllocationAnalysis/case2.deprecrated +++ b/src/Passes/tests/QubitAllocationAnalysis/case2.deprecrated @@ -1,4 +1,4 @@ -; RUN: opt -load-pass-plugin %shlibdir/libQubitAllocationAnalysis%shlibext -passes="print" %S/inputs/static-qubit-arrays-2.ll -disable-output 2>&1\ +; RUN: opt -load-pass-plugin %shlibdir/libQirAllocationAnalysis%shlibext -passes="print" %S/inputs/static-qubit-arrays-2.ll -disable-output 2>&1\ ; RUN: | FileCheck %s ;------------------------------------------------------------------------------ diff --git a/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.deprecated b/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.deprecated index ea4ead0400..5c9c6ade0c 100644 --- a/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.deprecated +++ b/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.deprecated @@ -1,5 +1,5 @@ -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" +; ModuleID = 'qir/TeleportChain.ll' +source_filename = "qir/TeleportChain.ll" %Array = type opaque %String = type opaque diff --git a/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.deprecated b/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.deprecated index b87010d9d2..2ac826a845 100644 --- a/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.deprecated +++ b/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.deprecated @@ -1,5 +1,5 @@ -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" +; ModuleID = 'qir/TeleportChain.ll' +source_filename = "qir/TeleportChain.ll" %Array = type opaque %String = type opaque From 0b71eb50bfdf081d9d51498191d89e71c41f06e1 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 16 Aug 2021 09:19:15 +0200 Subject: [PATCH 097/106] Updating README --- src/Passes/Dockerfile | 2 +- src/Passes/README.md | 8 -------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/src/Passes/Dockerfile b/src/Passes/Dockerfile index 8e5c9f2419..28c2ceae0f 100644 --- a/src/Passes/Dockerfile +++ b/src/Passes/Dockerfile @@ -17,7 +17,7 @@ RUN apt install -y clang-11 cmake clang-format-11 clang-tidy-11 && \ export CXX=clang++ # Python -RUN apt install -y python3 && \ +RUN apt install -y python3 python3-pip && \ update-alternatives --install /usr/bin/python python /usr/bin/python3 0 ADD . /src/ diff --git a/src/Passes/README.md b/src/Passes/README.md index d3ab94b54a..8a3a3e3705 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -440,11 +440,3 @@ If you forget to instantiate this variable in your corresponding `.cpp` file, ``` everything will compile, but the pass will fail to load. There will be no linking errors either. - -# Notes on QIR Profile Tool (QIR Adaptor Tool) - -Target: - -``` -./qat -profile=base-profile.yml -S file.ir > adapted.ir -``` From d324ae4fce5f528aefd4d9097442a325e9ddf15f Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 16 Aug 2021 10:20:07 +0200 Subject: [PATCH 098/106] Updating with review items --- src/Passes/CMakeLists.txt | 8 +++++--- src/Passes/Dockerfile | 8 ++++++-- src/Passes/README.md | 4 +--- src/Passes/Source/Rules/Factory.cpp | 12 ++++++++++-- 4 files changed, 22 insertions(+), 10 deletions(-) diff --git a/src/Passes/CMakeLists.txt b/src/Passes/CMakeLists.txt index a2a1074141..2ffe7403c0 100644 --- a/src/Passes/CMakeLists.txt +++ b/src/Passes/CMakeLists.txt @@ -29,8 +29,10 @@ endif() # are triggered if llvm-tutor is built without this flag (though otherwise it # builds fine). For consistency, add it here too. check_cxx_compiler_flag("-fvisibility-inlines-hidden" SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG) -if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} EQUAL "1") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") +if(${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG}) + if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} STREQUAL "1") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") + endif() endif() # We export the compile commands which are needed by clang-tidy @@ -43,7 +45,7 @@ include_directories(${LLVM_INCLUDE_DIRS}) link_directories(${LLVM_LIBRARY_DIRS}) add_definitions(${LLVM_DEFINITIONS}) include_directories(${CMAKE_SOURCE_DIR}/Source) -llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit x86asmparser x86codegen x86desc x86disassembler x86info interpreter) +llvm_map_components_to_libnames(llvm_libs support core irreader passes orcjit x86asmparser x86codegen x86desc x86disassembler x86info interpreter objcarcopts) # Adding the libraries diff --git a/src/Passes/Dockerfile b/src/Passes/Dockerfile index 28c2ceae0f..85b4e86faf 100644 --- a/src/Passes/Dockerfile +++ b/src/Passes/Dockerfile @@ -26,9 +26,13 @@ RUN cd /src/ && \ chmod +x manage # running the build -RUN ./manage runci - ENV CC=clang-11 \ CXX=clang++-11 \ PYTHONUNBUFFERED=1 \ PYTHON_BIN_PATH=/usr/bin/python3 + +WORKDIR /src/ +RUN export CC=clang-11 && \ + export CXX=clang++-11 && \ + ./manage runci + diff --git a/src/Passes/README.md b/src/Passes/README.md index 8a3a3e3705..0e99984f96 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -149,12 +149,10 @@ TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit: ; preds = % ``` -We note the absence of loops, and that quantum registers are "allocated" at compile time meaning that each qubit instance is assigned a unique ID. As some code may be dead and optimised away, the qubit allocation is not garantueed to be sequential at this point in time. Future work will include writing a qubit ID remapper which will allow qubits. +We note the absence of loops, and that quantum registers are "allocated" at compile time meaning that each qubit instance is assigned a unique ID. As some code may be dead and optimised away, the qubit allocation is not garantueed to be sequential at this point in time. Future work will include writing a qubit ID remapper which will allow qubit IDs to become strictly increasing with no gaps inbetween. We also note that the function `TeleportChain__TeleportQubitUsingPresharedEntanglement__body` was cloned twice. This is due to the allocation of qubits and the function being called twice. At present, the analyser does not take qubit release into account and just assumes that it will never be released due to the complicated nature for dealing with nested functions at compile time. -Current TODOs include getting LLVM to remove dead code, do better constant folding and function inlining. Once this is performed correctly, next steps is the remapper and finally a better analysis on what call paths potentially create problems in terms of qubit allocation. - ## Dependencies This library is written in C++ and depends on: diff --git a/src/Passes/Source/Rules/Factory.cpp b/src/Passes/Source/Rules/Factory.cpp index e383eb2d04..ef7720eb10 100644 --- a/src/Passes/Source/Rules/Factory.cpp +++ b/src/Passes/Source/Rules/Factory.cpp @@ -159,8 +159,16 @@ namespace quantum }; addRule({call("__quantum__rt__qubit_allocate"), allocation_replacer}); - // Removing release calls - removeFunctionCall("__quantum__rt__qubit_release"); + /// Release replacement + auto deleter = deleteInstruction(); + addRule( + {call("__quantum__rt__qubit_release", "name"_cap = _), + [qubit_alloc_manager, deleter](Builder& builder, Value* val, Captures& cap, Replacements& rep) { + qubit_alloc_manager->release(); + return deleter(builder, val, cap, rep); + } + + }); } void RuleFactory::useStaticResultAllocation() From 0f4b921cdebadb8784e99c4a32bc458569c7547f Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 16 Aug 2021 10:26:13 +0200 Subject: [PATCH 099/106] Updating linux build --- build/passes-linux.yml | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/build/passes-linux.yml b/build/passes-linux.yml index 58f666b149..e5746c5437 100644 --- a/build/passes-linux.yml +++ b/build/passes-linux.yml @@ -1,14 +1,19 @@ steps: - script: | - sudo apt update -y - sudo apt install -y clang-11 cmake clang-format-11 clang-tidy-11 - sudo apt-get install -y llvm-11 lldb-11 llvm-11-dev libllvm11 llvm-11-runtime - export CC=clang-11 - export CXX=clang++ - cd src/Passes/ - - pip install -r requirements.txt - chmod +x manage - ./manage runci - displayName: Linux build and CI for passes + export DEBIAN_FRONTEND=noninteractive + sudo apt-get update -y + sudo apt-get install -y + sudo apt-get install -y curl pkg-config findutils wget + sudo apt install -y clang-11 cmake clang-format-11 clang-tidy-11 + sudo apt-get install -y llvm-11 lldb-11 llvm-11-dev libllvm11 llvm-11-runtime + sudo apt install -y python3 python3-pip + sudo update-alternatives --install /usr/bin/python python /usr/bin/python3 0 + cd src/Passes/ + pip install -r requirements.txt + chmod +x manage + export PYTHONUNBUFFERED=1 + export PYTHON_BIN_PATH=/usr/bin/python3 + export CC=clang-11 + export CXX=clang++-11 + ./manage runci From 288abdc618ebb620965cefff958ff8c733252a73 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 16 Aug 2021 10:49:13 +0200 Subject: [PATCH 100/106] Fixing yaml file --- build/passes-linux.yml | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/build/passes-linux.yml b/build/passes-linux.yml index e5746c5437..aace2c868e 100644 --- a/build/passes-linux.yml +++ b/build/passes-linux.yml @@ -1,19 +1,20 @@ steps: - script: | - export DEBIAN_FRONTEND=noninteractive - sudo apt-get update -y - sudo apt-get install -y - sudo apt-get install -y curl pkg-config findutils wget - sudo apt install -y clang-11 cmake clang-format-11 clang-tidy-11 - sudo apt-get install -y llvm-11 lldb-11 llvm-11-dev libllvm11 llvm-11-runtime - sudo apt install -y python3 python3-pip - sudo update-alternatives --install /usr/bin/python python /usr/bin/python3 0 - cd src/Passes/ - pip install -r requirements.txt - chmod +x manage - export PYTHONUNBUFFERED=1 - export PYTHON_BIN_PATH=/usr/bin/python3 - export CC=clang-11 - export CXX=clang++-11 - ./manage runci + export DEBIAN_FRONTEND=noninteractive + sudo apt-get update -y + sudo apt-get install -y + sudo apt-get install -y curl pkg-config findutils wget + sudo apt install -y clang-11 cmake clang-format-11 clang-tidy-11 + sudo apt-get install -y llvm-11 lldb-11 llvm-11-dev libllvm11 llvm-11-runtime + sudo apt install -y python3 python3-pip + sudo update-alternatives --install /usr/bin/python python /usr/bin/python3 0 + cd src/Passes/ + pip install -r requirements.txt + chmod +x manage + export PYTHONUNBUFFERED=1 + export PYTHON_BIN_PATH=/usr/bin/python3 + export CC=clang-11 + export CXX=clang++-11 + ./manage runci + displayName: Linux build and CI for passes From 51fb281c51e7f8955338a7253d2776702f43bc7c Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 16 Aug 2021 10:55:57 +0200 Subject: [PATCH 101/106] Removing LLVM 12 --- build/passes-linux.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/build/passes-linux.yml b/build/passes-linux.yml index aace2c868e..29dc164c96 100644 --- a/build/passes-linux.yml +++ b/build/passes-linux.yml @@ -4,6 +4,7 @@ steps: export DEBIAN_FRONTEND=noninteractive sudo apt-get update -y sudo apt-get install -y + sudo apt-get remove -y llvm-12 sudo apt-get install -y curl pkg-config findutils wget sudo apt install -y clang-11 cmake clang-format-11 clang-tidy-11 sudo apt-get install -y llvm-11 lldb-11 llvm-11-dev libllvm11 llvm-11-runtime From fad944f128c48174096f36cd96a2d2a1d70ff381 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Troels=20F=2E=20R=C3=B8nnow?= <574350+troelsfr@users.noreply.github.com> Date: Mon, 16 Aug 2021 11:05:20 +0200 Subject: [PATCH 102/106] Update src/Passes/Source/Rules/ReplacementRule.hpp Co-authored-by: bettinaheim <34236215+bettinaheim@users.noreply.github.com> --- src/Passes/Source/Rules/ReplacementRule.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Passes/Source/Rules/ReplacementRule.hpp b/src/Passes/Source/Rules/ReplacementRule.hpp index 4edd61997a..a277087fd9 100644 --- a/src/Passes/Source/Rules/ReplacementRule.hpp +++ b/src/Passes/Source/Rules/ReplacementRule.hpp @@ -17,7 +17,7 @@ namespace quantum { /// Rule that describes a pattern and how to make a replacement of the matched values. - /// The class contians a OprandPrototype which is used to test whether an LLVM IR value + /// The class contains a OperandPrototype which is used to test whether an LLVM IR value /// follows a specific pattern. The class also holds a function pointer to logic that /// allows replacement of the specified value. class ReplacementRule From c2cbe9af065b88752f98c3a58acddf33f8d29e61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Troels=20F=2E=20R=C3=B8nnow?= <574350+troelsfr@users.noreply.github.com> Date: Mon, 16 Aug 2021 11:26:35 +0200 Subject: [PATCH 103/106] Update src/Passes/Source/AllocationManager/AllocationManager.hpp Co-authored-by: bettinaheim <34236215+bettinaheim@users.noreply.github.com> --- src/Passes/Source/AllocationManager/AllocationManager.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Passes/Source/AllocationManager/AllocationManager.hpp b/src/Passes/Source/AllocationManager/AllocationManager.hpp index 90805cff4e..877f98b2b1 100644 --- a/src/Passes/Source/AllocationManager/AllocationManager.hpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.hpp @@ -39,7 +39,7 @@ namespace quantum using NameToIndex = std::unordered_map; using Mappings = std::vector; - /// Pointer contstruction + /// Pointer construction /// @{ /// Creates a new allocation manager. The manager is kept /// as a shared pointer to enable allocation accross diffent From b2faa6c2c14101f8ec04a02caa98c2c55fd035f3 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 16 Aug 2021 14:31:45 +0200 Subject: [PATCH 104/106] Adding variaous suggestions --- .../AllocationManager/AllocationManager.hpp | 10 +++++--- src/Passes/Source/Llvm/Llvm.hpp | 1 - .../ExpandStaticAllocation.cpp | 10 ++++++-- .../ExpandStaticAllocation.hpp | 4 ++- .../LibQirAllocationAnalysis.cpp | 5 ++-- .../QirAllocationAnalysis.cpp | 6 ++--- .../QirAllocationAnalysis.hpp | 22 ++++++++++------ .../LibTransformationRule.cpp | 2 ++ .../TransformationRule/TransformationRule.cpp | 7 ------ .../TransformationRule/TransformationRule.hpp | 3 +++ src/Passes/Source/Profiles/BaseProfile.cpp | 25 +++---------------- src/Passes/Source/Rules/Factory.cpp | 19 +++++--------- src/Passes/Source/Rules/OperandPrototype.cpp | 4 +-- src/Passes/Source/Rules/OperandPrototype.hpp | 5 +++- .../Source/Rules/Operands/Instruction.cpp | 2 +- src/Passes/Source/Rules/RuleSet.hpp | 1 - 16 files changed, 59 insertions(+), 67 deletions(-) diff --git a/src/Passes/Source/AllocationManager/AllocationManager.hpp b/src/Passes/Source/AllocationManager/AllocationManager.hpp index 877f98b2b1..8b41d78c57 100644 --- a/src/Passes/Source/AllocationManager/AllocationManager.hpp +++ b/src/Passes/Source/AllocationManager/AllocationManager.hpp @@ -13,6 +13,9 @@ namespace microsoft { namespace quantum { + // TODO(QAT-private-issue-35): work out similarities and differences between resource allocation at + // runtime and compile time to make common interface and/or implementation depending on what is most + // suited. class AllocationManager { @@ -80,15 +83,16 @@ namespace quantum /// Memory mapping /// @{ /// Each allocation has a register/memory mapping which - /// keeps track of the + /// keeps track of the allocation index, the segment size + /// and its name (if any). NameToIndex name_to_index_; Mappings mappings_; /// @} /// Compile-time resources /// @{ - /// Compile-time allocated resources such as - /// arrays who + /// Compile-time allocated resources that keeps a pointer + /// to the corresponding Values. Resources resources_; /// @} diff --git a/src/Passes/Source/Llvm/Llvm.hpp b/src/Passes/Source/Llvm/Llvm.hpp index e0b19b497f..4979a02f1d 100644 --- a/src/Passes/Source/Llvm/Llvm.hpp +++ b/src/Passes/Source/Llvm/Llvm.hpp @@ -52,7 +52,6 @@ #include "llvm/Transforms/Scalar/LoopUnrollPass.h" // Profiles -#include "llvm/IR/LegacyPassManager.h" #include "llvm/LinkAllPasses.h" #include "llvm/Transforms/Scalar/ADCE.h" #include "llvm/Transforms/Scalar/DCE.h" diff --git a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp index 5b6eee081a..5f045ce4ed 100644 --- a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp +++ b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.cpp @@ -12,10 +12,15 @@ namespace microsoft { namespace quantum { + /// This pass traverse the IR and uses the QirAllocationAnalysis to determine + /// if a function call results in qubit and/or result allocation. If that is the case, + /// it makes a copy of the function and replaces the function call with a call to the + /// new function. llvm::PreservedAnalyses ExpandStaticAllocationPass::run( llvm::Function& function, llvm::FunctionAnalysisManager& fam) { + // Pass body for (auto& basic_block : function) { @@ -35,7 +40,7 @@ namespace quantum std::vector remaining_arguments{}; auto callee_function = call_instr->getCalledFunction(); - auto& use_quantum = fam.getResult(*callee_function); + auto& use_quantum = fam.getResult(*callee_function); if (use_quantum.value) { @@ -163,7 +168,8 @@ namespace quantum llvm::SmallVector returns; // Ignore returns cloned. - // TODO(tfr): In LLVM 13 upgrade 'true' to 'llvm::CloneFunctionChangeType::LocalChangesOnly' + // TODO(QAT-private-issue-28): In LLVM 13 upgrade 'true' to + // 'llvm::CloneFunctionChangeType::LocalChangesOnly' llvm::CloneFunctionInto(function, &callee, remapper, true, returns, "", nullptr); verifyFunction(*function); diff --git a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp index 3abd25050d..7f3a7bb8a8 100644 --- a/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp +++ b/src/Passes/Source/Passes/ExpandStaticAllocation/ExpandStaticAllocation.hpp @@ -13,10 +13,12 @@ namespace microsoft namespace quantum { + /// This class copies functions which does static qubit and/or result allocation. This is done + /// to ensure that qubits/result registers are not reused but instead assigned unique ids. class ExpandStaticAllocationPass : public llvm::PassInfoMixin { public: - using QubitAllocationResult = QirAllocationAnalysisAnalytics::Result; + using QubitAllocationResult = QirAllocationAnalysis::Result; using ConstantArguments = std::unordered_map; /// Constructors and destructors diff --git a/src/Passes/Source/Passes/QirAllocationAnalysis/LibQirAllocationAnalysis.cpp b/src/Passes/Source/Passes/QirAllocationAnalysis/LibQirAllocationAnalysis.cpp index f802656471..47ad891e73 100644 --- a/src/Passes/Source/Passes/QirAllocationAnalysis/LibQirAllocationAnalysis.cpp +++ b/src/Passes/Source/Passes/QirAllocationAnalysis/LibQirAllocationAnalysis.cpp @@ -34,9 +34,8 @@ llvm::PassPluginLibraryInfo getQirAllocationAnalysisPluginInfo() }); // Registering the analysis module - pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager& fam) { - fam.registerPass([] { return QirAllocationAnalysisAnalytics(); }); - }); + pb.registerAnalysisRegistrationCallback( + [](FunctionAnalysisManager& fam) { fam.registerPass([] { return QirAllocationAnalysis(); }); }); }}; } diff --git a/src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.cpp b/src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.cpp index 4648b2540d..fea4377fee 100644 --- a/src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.cpp +++ b/src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.cpp @@ -14,7 +14,7 @@ namespace microsoft namespace quantum { - QirAllocationAnalysisAnalytics::Result QirAllocationAnalysisAnalytics::run( + QirAllocationAnalysis::Result QirAllocationAnalysis::run( llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/) { @@ -61,7 +61,7 @@ namespace quantum llvm::Function& function, llvm::FunctionAnalysisManager& fam) { - auto& result = fam.getResult(function); + auto& result = fam.getResult(function); if (result.value) { @@ -79,7 +79,7 @@ namespace quantum return true; } - llvm::AnalysisKey QirAllocationAnalysisAnalytics::Key; + llvm::AnalysisKey QirAllocationAnalysis::Key; } // namespace quantum } // namespace microsoft diff --git a/src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.hpp b/src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.hpp index cafc311941..3d6c6f3c21 100644 --- a/src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.hpp +++ b/src/Passes/Source/Passes/QirAllocationAnalysis/QirAllocationAnalysis.hpp @@ -12,11 +12,15 @@ namespace microsoft namespace quantum { - class QirAllocationAnalysisAnalytics : public llvm::AnalysisInfoMixin + /// QirAllocationAnalysis is a LLVM pass that statistics on the usage of operations which allocates + /// resources such as qubits and results. + class QirAllocationAnalysis : public llvm::AnalysisInfoMixin { public: using String = std::string; + /// Result annotation. Contains a single value which + /// is true if the function uses allocations of qubits or results. struct Result { bool value{false}; @@ -24,16 +28,16 @@ namespace quantum /// Constructors and destructors /// @{ - QirAllocationAnalysisAnalytics() = default; - QirAllocationAnalysisAnalytics(QirAllocationAnalysisAnalytics const&) = delete; - QirAllocationAnalysisAnalytics(QirAllocationAnalysisAnalytics&&) = default; - ~QirAllocationAnalysisAnalytics() = default; + QirAllocationAnalysis() = default; + QirAllocationAnalysis(QirAllocationAnalysis const&) = delete; + QirAllocationAnalysis(QirAllocationAnalysis&&) = default; + ~QirAllocationAnalysis() = default; /// @} /// Operators /// @{ - QirAllocationAnalysisAnalytics& operator=(QirAllocationAnalysisAnalytics const&) = delete; - QirAllocationAnalysisAnalytics& operator=(QirAllocationAnalysisAnalytics&&) = delete; + QirAllocationAnalysis& operator=(QirAllocationAnalysis const&) = delete; + QirAllocationAnalysis& operator=(QirAllocationAnalysis&&) = delete; /// @} /// Functions required by LLVM @@ -43,9 +47,11 @@ namespace quantum private: static llvm::AnalysisKey Key; // NOLINT - friend struct llvm::AnalysisInfoMixin; + friend struct llvm::AnalysisInfoMixin; }; + /// QirAllocationAnalysisPrinter is a LLVM pass class that prints statistics generated by + /// QirAllocationAnalysis. class QirAllocationAnalysisPrinter : public llvm::PassInfoMixin { public: diff --git a/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp b/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp index 55fc5426ad..41a9e19eef 100644 --- a/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp +++ b/src/Passes/Source/Passes/TransformationRule/LibTransformationRule.cpp @@ -23,6 +23,8 @@ llvm::PassPluginLibraryInfo getTransformationRulePluginInfo() // Base profile if (name == "restrict-qir") { + // Defining a harded coded set of rules as LLVM does not provide means + // to configure passes through opt. RuleSet rule_set; // Defining the mapping diff --git a/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp b/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp index 8fa45abb59..297fbd8d4e 100644 --- a/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp +++ b/src/Passes/Source/Passes/TransformationRule/TransformationRule.cpp @@ -69,13 +69,6 @@ namespace quantum } } - /* - for (auto &basic_block : function) - { - llvm::errs() << "REPLACEMENTS DONE FOR:\n"; - llvm::errs() << basic_block << "\n\n"; - } - */ // If we did not change the IR, we report that we preserved all if (replacements_.empty()) { diff --git a/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp b/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp index 831189a9dd..a74a845bcf 100644 --- a/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp +++ b/src/Passes/Source/Passes/TransformationRule/TransformationRule.hpp @@ -13,6 +13,9 @@ namespace microsoft namespace quantum { + /// This class applies a set of transformation rules to the IR to transform it into a new IR. The + /// rules are added using the RuleSet class which allows the developer to create one or more rules + /// on how to transform the IR. class TransformationRulePass : public llvm::PassInfoMixin { public: diff --git a/src/Passes/Source/Profiles/BaseProfile.cpp b/src/Passes/Source/Profiles/BaseProfile.cpp index 31889a6271..e6db328b21 100644 --- a/src/Passes/Source/Profiles/BaseProfile.cpp +++ b/src/Passes/Source/Profiles/BaseProfile.cpp @@ -27,7 +27,8 @@ namespace quantum auto inliner_pass = pass_builder.buildInlinerPipeline(optimisation_level, llvm::PassBuilder::ThinLTOPhase::None, debug); - // TODO(tfr): Maybe this should be done at a module level + // TODO(QAT-private-issue-29): Determine if static expansion should happen as a module pass + // instead of a function pass function_pass_manager.addPass(ExpandStaticAllocationPass()); RuleSet rule_set; @@ -52,31 +53,13 @@ namespace quantum function_pass_manager.addPass(llvm::DCEPass()); function_pass_manager.addPass(llvm::ADCEPass()); - // function_pass_manager.addPass(llvm::createCalledValuePropagationPass()); - // function_pass_manager.addPass(createSIFoldOperandsPass()); - - // Legacy passes: - // https://llvm.org/doxygen/group__LLVMCTransformsIPO.html#ga2ebfe3e0c3cca3b457708b4784ba93ff - - // https://llvm.org/docs/NewPassManager.html - // modulePassManager.addPass(createModuleToCGSCCPassAdaptor(...)); - // InlinerPass() - - // auto &cgpm = inliner_pass.getPM(); - // cgpm.addPass(llvm::ADCEPass()); - - // CGPM.addPass(createCGSCCToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(LoopFooPass()))); - // CGPM.addPass(createCGSCCToFunctionPassAdaptor(FunctionFooPass())); - ret.addPass(createModuleToFunctionPassAdaptor(std::move(function_pass_manager))); - // TODO(tfr): Not available in 11 + // TODO(QAT-private-issue-30): Mordernise: Upon upgrading to LLVM 12 or 13, change CGPM to // ret.addPass(llvm::createModuleToCGSCCPassAdaptor(std::move(CGPM))); ret.addPass(llvm::AlwaysInlinerPass()); ret.addPass(std::move(inliner_pass)); - // ret.addPass(); - // CGSCCA pass llvm::InlinerPass() return ret; } @@ -91,7 +74,7 @@ namespace quantum void BaseProfile::addFunctionAnalyses(FunctionAnalysisManager& fam) { - fam.registerPass([] { return QirAllocationAnalysisAnalytics(); }); + fam.registerPass([] { return QirAllocationAnalysis(); }); } } // namespace quantum diff --git a/src/Passes/Source/Rules/Factory.cpp b/src/Passes/Source/Rules/Factory.cpp index ef7720eb10..32d6483c6e 100644 --- a/src/Passes/Source/Rules/Factory.cpp +++ b/src/Passes/Source/Rules/Factory.cpp @@ -37,7 +37,7 @@ namespace quantum void RuleFactory::useStaticQubitArrayAllocation() { - // TODO(tfr): Consider using weak pointers + // TODO(QAT-private-issue-32): Use weak pointers to capture allocation managers auto qubit_alloc_manager = qubit_alloc_manager_; /// Allocation @@ -89,9 +89,7 @@ namespace quantum // Computing offset auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); - // TODO(tfr): Understand what the significance of the addressspace is in relation to the - // QIR. Activate by uncommenting: - // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); + // Converting pointer auto instr = new llvm::IntToPtrInst(new_index, ptr_type); instr->takeName(val); @@ -140,15 +138,12 @@ namespace quantum auto offset = qubit_alloc_manager->allocate(); // Creating a new index APInt that is shifted by the offset of the allocation - // TODO(tfr): Get the bitwidth size from somewhere + // TODO(QAT-private-issue-32): Make default integer width a module parameter or extract from QIR auto idx = llvm::APInt(64, offset); // Computing offset auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); - // TODO(tfr): Understand what the significance of the addressspace is in relation to the - // QIR. Activate by uncommenting: - // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); auto instr = new llvm::IntToPtrInst(new_index, ptr_type); instr->takeName(val); @@ -188,15 +183,12 @@ namespace quantum auto offset = result_alloc_manager->allocate(); // Creating a new index APInt that is shifted by the offset of the allocation - // TODO(tfr): Get the bitwidth size from somewhere + // TODO(QAT-private-issue-32): Make default integer width a module parameter or extract from QIR auto idx = llvm::APInt(64, offset); // Computing offset auto new_index = llvm::ConstantInt::get(builder.getContext(), idx); - // TODO(tfr): Understand what the significance of the addressspace is in relation to the - // QIR. Activate by uncommenting: - // ptr_type = llvm::PointerType::get(ptr_type->getElementType(), 2); auto instr = new llvm::IntToPtrInst(new_index, ptr_type); instr->takeName(val); @@ -229,7 +221,6 @@ namespace quantum builder.CreateCall(function, arguments); // Replacing the instruction with new instruction - // TODO(tfr): (tfr): insert instruction before and then replace, with new call replacements.push_back({llvm::dyn_cast(val), instr}); return true; @@ -284,6 +275,8 @@ namespace quantum }; /* + Here is an example IR for which we want to make a match: + %1 = call %Result* @__quantum__rt__result_get_one() %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) br i1 %2, label %then0__1, label %continue__1 diff --git a/src/Passes/Source/Rules/OperandPrototype.cpp b/src/Passes/Source/Rules/OperandPrototype.cpp index e33547474d..3a425a3404 100644 --- a/src/Passes/Source/Rules/OperandPrototype.cpp +++ b/src/Passes/Source/Rules/OperandPrototype.cpp @@ -38,8 +38,8 @@ namespace quantum return true; } - // llvm::errs() << "SUCCESS MATCH: " << *value << " " << user->getNumOperands() << "\n"; - // TODO(tfr): Check other possibilities for value + // TODO(QAT-private-issue-33): value may be other type than llvm::User. Check other relevant types + // and deal with it. return true; } diff --git a/src/Passes/Source/Rules/OperandPrototype.hpp b/src/Passes/Source/Rules/OperandPrototype.hpp index 5f910176fc..1b0269f76d 100644 --- a/src/Passes/Source/Rules/OperandPrototype.hpp +++ b/src/Passes/Source/Rules/OperandPrototype.hpp @@ -13,7 +13,10 @@ namespace quantum { /// IOperandPrototype describes an IR pattern and allows matching against - /// LLVMs llvm::Value type. + /// LLVMs llvm::Value type. Each value may or may not be captured during the + /// matching process which means that they are stored in a map under a given name. + /// Capturing is enabled using `enableCapture(name)` which sets the name the + /// value should be stored under. class IOperandPrototype { public: diff --git a/src/Passes/Source/Rules/Operands/Instruction.cpp b/src/Passes/Source/Rules/Operands/Instruction.cpp index 9ee7a05331..04305c99fb 100644 --- a/src/Passes/Source/Rules/Operands/Instruction.cpp +++ b/src/Passes/Source/Rules/Operands/Instruction.cpp @@ -28,7 +28,7 @@ namespace quantum return std::move(ret); } -// TODO(tfr): This seems to be a bug in LLVM. Template instantiations in +// TODO(QAT-private-issue-34): This seems to be a bug in LLVM. Template instantiations in // a single translation unit is not supposed to reinstantiate across other // translation units. // diff --git a/src/Passes/Source/Rules/RuleSet.hpp b/src/Passes/Source/Rules/RuleSet.hpp index 53490bfe6b..44571fe4b0 100644 --- a/src/Passes/Source/Rules/RuleSet.hpp +++ b/src/Passes/Source/Rules/RuleSet.hpp @@ -44,7 +44,6 @@ namespace quantum /// @{ RuleSet& operator=(RuleSet const&) = default; RuleSet& operator=(RuleSet&&) = default; - // TODO(tfr): add RuleSet operator&(RuleSet const &other); /// @} /// Operating rule sets From 511baaa52d976e7990ca53ed4bd3b61237c44006 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 16 Aug 2021 15:46:32 +0200 Subject: [PATCH 105/106] Updating documentation --- src/Passes/README.md | 213 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 183 insertions(+), 30 deletions(-) diff --git a/src/Passes/README.md b/src/Passes/README.md index 0e99984f96..a3bbd12c13 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -4,16 +4,16 @@ ## Quick start -Once the project is built (see next sections), you can generate a new QIR as follows: +Once the project is built (see next sections), you can transform a QIR according to a profile as follows: ```sh -./Source/Apps/qat --generate --profile baseProfile ../examples/QirAllocationAnalysis/analysis-example.ll +./Source/Apps/qat --generate --profile baseProfile -S ../examples/QirAllocationAnalysis/analysis-example.ll ``` -Likewise, you can validate that a QIR follows a specification by running: +Likewise, you can validate that a QIR follows a specification by running (Note, not implemented yet): ```sh -./Source/Apps/qat --validate --profile baseProfile ../examples/QirAllocationAnalysis/analysis-example.ll +./Source/Apps/qat --validate --profile baseProfile -S ../examples/QirAllocationAnalysis/analysis-example.ll ``` ## Example @@ -76,10 +76,10 @@ namespace TeleportChain { } ``` -Once compiled and the initial QIR is generated and save in the file `analysis-example.ll`, we execute the command +The corresponding QIR can be generated by going to `examples/QubitAllocationAnalysis` and run `make analysis-example.ll`. This step requires that you have a working installation of Q#. Once compiled and the initial QIR is generated and saved in the file `analysis-example.ll`, we execute the command ``` -./Source/Apps/qat --generate --profile baseProfile ./analysis-example.ll +./Source/Apps/qat --generate --profile baseProfile -S ./analysis-example.ll ``` The QAT tool will now attempt to map the QIR in `analysis-example.ll` into a QIR which is compatible with the base format. Removing type and function declarations, the correspoding code reads: @@ -149,7 +149,7 @@ TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit: ; preds = % ``` -We note the absence of loops, and that quantum registers are "allocated" at compile time meaning that each qubit instance is assigned a unique ID. As some code may be dead and optimised away, the qubit allocation is not garantueed to be sequential at this point in time. Future work will include writing a qubit ID remapper which will allow qubit IDs to become strictly increasing with no gaps inbetween. +We note the absence of loops, and that qubit registers are "allocated" at compile time meaning that each qubit instance is assigned a unique ID. As some code may be dead and optimised away, the qubit allocation is not garantueed to be sequential at this point in time. Future work will include writing a qubit ID remapper which will allow qubit IDs to become strictly increasing with no gaps inbetween. We also note that the function `TeleportChain__TeleportQubitUsingPresharedEntanglement__body` was cloned twice. This is due to the allocation of qubits and the function being called twice. At present, the analyser does not take qubit release into account and just assumes that it will never be released due to the complicated nature for dealing with nested functions at compile time. @@ -157,17 +157,18 @@ We also note that the function `TeleportChain__TeleportQubitUsingPresharedEntang This library is written in C++ and depends on: -- LLVM +- LLVM 11 Additional development dependencies include: - CMake - clang-format - clang-tidy +- Python 3 -## Building the passes +## Configuring the build directory -To build the passes, create a new build directory and switch to that directory: +To build the tool, create a new build directory and switch to that directory: ```sh mkdir Debug @@ -188,8 +189,6 @@ make [target] The default target is `all`. Other valid targets are the name of the folders in `libs/` found in the passes root. -# Profile adoption tool - ## Building QAT First @@ -205,7 +204,11 @@ then ./Source/Apps/qat ``` -## Implementing a profile pass +# Brief developer guide + +## Extending the profile logic + +### Implementing a profile transformation pass As an example of how one can implement a new profile pass, we here show the implementational details of our example pass which allows mapping the teleportation code to the base profile: @@ -242,7 +245,7 @@ As an example of how one can implement a new profile pass, we here show the impl Transformations of the IR will happen on the basis of what rules are added to the rule set. The purpose of the factory is to make easy to add rules that serve a single purpose as well as making a basis for making rules unit testable. -## Implementing new rules +### Implementing new rules Implementing new rules consists of two steps: Defining a pattern that one wish to replace and implementing the corresponding replacement logic. Inside a factory member function, this look as follows: @@ -298,21 +301,7 @@ After a positive match is found, the lead instruction alongside a IRBuilder, a c }; ``` -# Passes - -## Running a pass - -You can run a pass using [opt](https://llvm.org/docs/CommandGuide/opt.html) as follows: - -```sh -cd examples/ClassicalIrCommandline -make emit-llvm-bc -opt -load-pass-plugin ../../{Debug,Release}/libOpsCounter.{dylib,so} --passes="print" -disable-output classical-program.bc -``` - -For a detailed tutorial, see examples. - -## Creating a new pass +## Implementing a new pass To make it easy to create a new pass, we have created a few templates to get you started quickly: @@ -366,7 +355,19 @@ Implement your pass here: bar Implement your pass here: main ``` -## CI +## Running a pass + +You can run a pass using [opt](https://llvm.org/docs/CommandGuide/opt.html) as follows: + +```sh +cd examples/ClassicalIrCommandline +make emit-llvm-bc +opt -load-pass-plugin ../../{Debug,Release}/libOpsCounter.{dylib,so} --passes="print" -disable-output classical-program.bc +``` + +For a detailed tutorial, see examples. + +# CI Before making a pull request with changes to this library, please ensure that style checks passes, that the code compiles, unit test passes and that there are no erros found by the static analyser. @@ -409,6 +410,158 @@ You can run all processes by running: As `clang-tidy` and `clang-format` acts slightly different from version to version and on different platforms, it is recommended that you use a docker image to perform these steps. TODO(TFR): The docker image is not added yet and this will be documented in the future. +# Introduction to passes + +Amongst other things, this library defines [LLVM passes](https://llvm.org/docs/Passes.html) used for analysing, optimising and transforming the IR. The QIR pass library is a dynamic library that can be compiled and ran separately from the +rest of the project code. + +## What do LLVM passes do? + +Before getting started, we here provide a few examples of classical use cases for [LLVM passes](https://llvm.org/docs/Passes.html). You find additional [instructive examples here][1]. + +**Example 1: Transformation**. As a first example of what [LLVM passes](https://llvm.org/docs/Passes.html) can do, we look at optimisation. Consider a compiler which +compiles + +```c +double test(double x) { + return (1+2+x)*(x+(1+2)); +} +``` + +into following IR: + +``` +define double @test(double %x) { +entry: + %addtmp = fadd double 3.000000e+00, %x + %addtmp1 = fadd double %x, 3.000000e+00 + %multmp = fmul double %addtmp, %addtmp1 + ret double %multmp +} +``` + +This code is obviously inefficient as we could get rid of one operation by rewritting the code to: + +```c +double test(double x) { + double y = 3+x; + return y * y; +} +``` + +One purpose of [LLVM passes](https://llvm.org/docs/Passes.html) is to allow automatic transformation from the above IR to the IR: + +``` +define double @test(double %x) { +entry: + %addtmp = fadd double %x, 3.000000e+00 + %multmp = fmul double %addtmp, %addtmp + ret double %multmp +} +``` + +**Example 2: Analytics**. Another example of useful passes are those generating and collecting statistics about the program. For instance, one analytics program +makes sense for classical programs is to count instructions used to implement functions. Take the C program: + +```c +int foo(int x) +{ + return x; +} + +void bar(int x, int y) +{ + foo(x + y); +} + +int main() +{ + foo(2); + bar(3, 2); + + return 0; +} +``` + +which produces follow IR (without optimisation): + +```language +define dso_local i32 @foo(i32 %0) #0 { + %2 = alloca i32, align 4 + store i32 %0, i32* %2, align 4 + %3 = load i32, i32* %2, align 4 + ret i32 %3 +} + +define dso_local void @bar(i32 %0, i32 %1) #0 { + %3 = alloca i32, align 4 + %4 = alloca i32, align 4 + store i32 %0, i32* %3, align 4 + store i32 %1, i32* %4, align 4 + %5 = load i32, i32* %3, align 4 + %6 = load i32, i32* %4, align 4 + %7 = add nsw i32 %5, %6 + %8 = call i32 @foo(i32 %7) + ret void +} + +define dso_local i32 @main() #0 { + %1 = alloca i32, align 4 + store i32 0, i32* %1, align 4 + %2 = call i32 @foo(i32 2) + call void @bar(i32 3, i32 2) + ret i32 0 +} +``` + +A stat pass for this code, would collect following statisics: + +```text +Stats for 'foo' +=========================== +Opcode # Used +--------------------------- +load 1 +ret 1 +alloca 1 +store 1 +--------------------------- + +Stats for 'bar' +=========================== +Opcode # Used +--------------------------- +load 2 +add 1 +ret 1 +alloca 2 +store 2 +call 1 +--------------------------- + +Stats for 'main' +=========================== +Opcode # Used +--------------------------- +ret 1 +alloca 1 +store 1 +call 2 +--------------------------- +``` + +**Example 3: Code validation**. A third use case is code validation. For example, one could write a pass to check whether bounds are exceeded on [static arrays][2]. +Note that this is a non-standard usecase as such analysis is usually made using the AST rather than at the IR level. + +**References** + +- [1] https://github.com/banach-space/llvm-tutor#analysis-vs-transformation-pass +- [2] https://github.com/victor-fdez/llvm-array-check-pass + +## Out-of-source Pass + +This library is build as set of out-of-source-passes. All this means is that we will not be downloading the LLVM repository and modifying this repository directly. You can read more [here](https://llvm.org/docs/CMake.html#cmake-out-of-source-pass). + # Developer FAQ ## Pass does not load From ba69f979ecdb1e90d30e0fa567974da9822fa3bf Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 16 Aug 2021 17:13:19 +0200 Subject: [PATCH 106/106] Fixing broken link --- src/Passes/docs/base-profile-transformations.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Passes/docs/base-profile-transformations.md b/src/Passes/docs/base-profile-transformations.md index 681046c32b..eb113300ed 100644 --- a/src/Passes/docs/base-profile-transformations.md +++ b/src/Passes/docs/base-profile-transformations.md @@ -106,7 +106,7 @@ exit__1: ; preds = %header__1 } ``` -After applying the our [demo profile transformation](https://github.com/troelsfr/qsharp-compiler/tree/feature/profile-pass/src/Passes/examples/QirAllocationAnalysis), the QIR is reduced to: +After applying the our demo profile transformation, the QIR is reduced to: ``` define void @Feasibility__QubitMapping__Interop() local_unnamed_addr #0 {