From 3c76e083557b7bf50922160ecda589554f30824a Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 20 Jul 2021 09:56:56 +0200 Subject: [PATCH 01/48] Initial proposal for a QsPasses structure --- src/QsPasses/CMakeLists.txt | 28 ++++++++++++++++++++++++++++ src/QsPasses/Makefile | 3 +++ src/QsPasses/README.md | 19 +++++++++++++++++++ src/QsPasses/docs/index.md | 3 +++ 4 files changed, 53 insertions(+) create mode 100644 src/QsPasses/CMakeLists.txt create mode 100644 src/QsPasses/Makefile create mode 100644 src/QsPasses/README.md create mode 100644 src/QsPasses/docs/index.md diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt new file mode 100644 index 0000000000..98d45c7ffa --- /dev/null +++ b/src/QsPasses/CMakeLists.txt @@ -0,0 +1,28 @@ +cmake_minimum_required(VERSION 3.4.3) + +project(QSharpPasses) + +find_package(LLVM REQUIRED CONFIG) + +message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") +message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wconversion -Wpedantic") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") + +include_directories(${LLVM_INCLUDE_DIRS}) +add_definitions(${LLVM_DEFINITIONS}) +include_directories(${CMAKE_SOURCE_DIR}/src) + +# LLVM uses RTTI by default - added here for consistency +if(NOT LLVM_ENABLE_RTTI) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") +endif() + +# The main libary +add_library(QSharpPasses SHARED src/GateCounter/GateCounter.cpp) +target_link_libraries(QSharpPasses + "$<$:-undefined dynamic_lookup>") diff --git a/src/QsPasses/Makefile b/src/QsPasses/Makefile new file mode 100644 index 0000000000..270dacab28 --- /dev/null +++ b/src/QsPasses/Makefile @@ -0,0 +1,3 @@ +clean: + rm -rf Release/ + rm -rf Debug/ \ No newline at end of file diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md new file mode 100644 index 0000000000..ce58881145 --- /dev/null +++ b/src/QsPasses/README.md @@ -0,0 +1,19 @@ +# Q# Passes for LLVM + +This subcomponent defines LLVM passes used for optimising and transforming the IR. + +## Getting started + +The Q# pass component is a dynamic library that can be compiled and ran separately from the +rest of the project code. + +## Dependencies + +This subcomponent is written in C++ and depends on: + +- LLVM +- + +## Building the passes + +To build the diff --git a/src/QsPasses/docs/index.md b/src/QsPasses/docs/index.md new file mode 100644 index 0000000000..08c1bfc9b7 --- /dev/null +++ b/src/QsPasses/docs/index.md @@ -0,0 +1,3 @@ +# Q# pass documentation + +This directory and file is a placeholder for describing LLVM passes which was already implemented. From 9eb5c027e5af82681ee7f4c8fbda35dcfeb73331 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 20 Jul 2021 09:57:27 +0200 Subject: [PATCH 02/48] Updating CMake --- src/QsPasses/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt index 98d45c7ffa..eb5eb53301 100644 --- a/src/QsPasses/CMakeLists.txt +++ b/src/QsPasses/CMakeLists.txt @@ -7,12 +7,14 @@ find_package(LLVM REQUIRED CONFIG) message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") +# Setting the standard for set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wconversion -Wpedantic") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") + include_directories(${LLVM_INCLUDE_DIRS}) add_definitions(${LLVM_DEFINITIONS}) include_directories(${CMAKE_SOURCE_DIR}/src) From 6674e248dd6e963045c290826a9d6bd922219305 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 20 Jul 2021 12:43:18 +0200 Subject: [PATCH 03/48] Adding CI stuff --- src/QsPasses/.clang-format | 57 +++++++ src/QsPasses/.clang-tidy | 22 +++ src/QsPasses/Makefile | 14 +- src/QsPasses/README.md | 62 ++++++- src/QsPasses/scripts/Builder/__init__.py | 0 src/QsPasses/scripts/FormatSource/__init__.py | 152 ++++++++++++++++++ src/QsPasses/scripts/ToolChain/__init__.py | 0 src/QsPasses/src/GateCounter/GateCounter.cpp | 64 ++++++++ src/QsPasses/src/Llvm.hpp | 37 +++++ 9 files changed, 401 insertions(+), 7 deletions(-) create mode 100644 src/QsPasses/.clang-format create mode 100644 src/QsPasses/.clang-tidy create mode 100644 src/QsPasses/scripts/Builder/__init__.py create mode 100644 src/QsPasses/scripts/FormatSource/__init__.py create mode 100644 src/QsPasses/scripts/ToolChain/__init__.py create mode 100644 src/QsPasses/src/GateCounter/GateCounter.cpp create mode 100644 src/QsPasses/src/Llvm.hpp diff --git a/src/QsPasses/.clang-format b/src/QsPasses/.clang-format new file mode 100644 index 0000000000..f44b5289b3 --- /dev/null +++ b/src/QsPasses/.clang-format @@ -0,0 +1,57 @@ +--- +BasedOnStyle: Google +Language: Cpp +AccessModifierOffset: -2 +AlignConsecutiveAssignments: true +AlignConsecutiveDeclarations: true +AlignTrailingComments: true +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: None +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +BraceWrapping: + AfterClass: true + AfterControlStatement: true + AfterEnum: true + AfterFunction: true + AfterNamespace: false + AfterStruct: true + AfterUnion: true + AfterExternBlock: true + BeforeCatch: true + BeforeElse: true + SplitEmptyFunction: false +BreakBeforeBraces: Custom +BreakConstructorInitializers: BeforeComma +ColumnLimit: 100 +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 2 +ContinuationIndentWidth: 4 +DerivePointerAlignment: false +IncludeBlocks: Regroup +IncludeCategories: + - Regex: '.*\.\..*' + Priority: 1 + - Regex: '^<.*\.h.*>$' + Priority: 5 + - Regex: '^<.*>$' + Priority: 6 + - Regex: '^"(gtest)|(gmock)|(benchmark)/.*"$' + Priority: 4 + - Regex: '.*/.*' + Priority: 3 + - Regex: '.*' + Priority: 2 +IncludeIsMainRegex: '' +IndentCaseLabels: false +IndentWidth: 2 +KeepEmptyLinesAtTheStartOfBlocks: true +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +PointerAlignment: Right +SortIncludes: true +SortUsingDeclarations: true +SpaceInEmptyParentheses: false +SpacesInAngles: false +Standard: Cpp11 +UseTab: Never diff --git a/src/QsPasses/.clang-tidy b/src/QsPasses/.clang-tidy new file mode 100644 index 0000000000..f86dcb3ccf --- /dev/null +++ b/src/QsPasses/.clang-tidy @@ -0,0 +1,22 @@ +Checks: "-*,\ +bugprone-*,\ +cert-dcl*,\ +cert-env*,\ +cert-err52-cpp,\ +cert-err60-cpp,\ +cert-flp30-c,\ +clang-analyzer-security.FloatLoopCounter,\ +google-build-explicit-make-pair,\ +google-build-namespaces,\ +google-explicit-constructor,\ +google-readability-*,\ +google-runtime-operator,\ +hicpp-exception-baseclass,\ +hicpp-explicit-conversions,\ +hicpp-use-*,\ +misc-*,\ +-misc-misplaced-widening-cast,\ +modernize-*,\ +performance-*,\ +readability-*,\ +-readability-identifier-naming" \ No newline at end of file diff --git a/src/QsPasses/Makefile b/src/QsPasses/Makefile index 270dacab28..b5709e0fa4 100644 --- a/src/QsPasses/Makefile +++ b/src/QsPasses/Makefile @@ -1,3 +1,13 @@ +stylecheck: + @python scripts/FormatSource/__init__.py + +lint: + @echo "Static analysis not added yet" + +tests: + mkdir Debug + cd Debug && cmake .. && make -j4 && ctest + clean: - rm -rf Release/ - rm -rf Debug/ \ No newline at end of file + rm -rf Release/ + rm -rf Debug/ diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index ce58881145..85a3d5e1a3 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -1,19 +1,71 @@ # Q# Passes for LLVM -This subcomponent defines LLVM passes used for optimising and transforming the IR. +This library defines LLVM passes used for optimising and transforming the IR. ## Getting started -The Q# pass component is a dynamic library that can be compiled and ran separately from the +The Q# pass library is a dynamic library that can be compiled and ran separately from the rest of the project code. ## Dependencies -This subcomponent is written in C++ and depends on: +This library is written in C++ and depends on: - LLVM -- + +Additional development dependencies include: + +- CMake +- clang-format +- clang-tidy ## Building the passes -To build the +To build the passes, create a new build directory and switch to that directory: + +```sh +mkdir Debug +cd Debug/ +``` + +To build the library, first configure CMake from the build directory + +```sh +cmake .. +``` + +and then make your target + +```sh +make [target] +``` + +## Running a pass + +Yet to be written + +## CI + +Before making a pull request with changes to this library, please ensure that style checks passes, that the code compiles, +unit test passes and that there are no erros found by the static analyser. + +To check the style, run + +```sh +make stylecheck +``` + +To test that the code compiles and tests passes run + +```sh +make tests +``` + +Finally, to analyse the code, run + +```sh +make lint +``` + +As `clang-tidy` and `clang-format` acts slightly different from version to version and on different platforms, it is recommended +that you use a docker image to perform these steps. diff --git a/src/QsPasses/scripts/Builder/__init__.py b/src/QsPasses/scripts/Builder/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/QsPasses/scripts/FormatSource/__init__.py b/src/QsPasses/scripts/FormatSource/__init__.py new file mode 100644 index 0000000000..f8f6e3929e --- /dev/null +++ b/src/QsPasses/scripts/FormatSource/__init__.py @@ -0,0 +1,152 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from os import path +import os +import logging +import subprocess + + +def discover_formatter(): + # TODO(TFR): Auto discover, use full path + return "clang-format" + + +logger = logging.getLogger() +PROJECT_ROOT = path.abspath(path.dirname(path.dirname(path.dirname(__file__)))) +CLANG_FORMAT_EXE = discover_formatter() + +####### +# Style pipeline components + + +def require_token(token, filename, contents, cursor, dry_run): + failed = False + if not contents[cursor:].startswith(token): + logger.error("{}: File must have {} at position {}".format(filename, token, cursor)) + failed = True + return cursor + len(token), failed + + +def require_pragma_once(filename, contents, cursor, dry_run): + return require_token("#pragma once\n", filename, contents, cursor, dry_run) + + +def require_todo_owner(filename, contents, cursor, dry_run): + # TODO(tfr): implement + return cursor, False + + +def enforce_cpp_license(filename, contents, cursor, dry_run): + return require_token("""// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +""", filename, contents, cursor, dry_run) + + +def enforce_py_license(filename, contents, cursor, dry_run): + # Allowing empty files + if contents.strip() == "": + return cursor, False + + return require_token("""# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +""", filename, contents, cursor, dry_run) + + +def enforce_formatting(filename, contents, cursor, dry_run): + p = subprocess.Popen( + [CLANG_FORMAT_EXE, '-style=file'], + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + cwd=PROJECT_ROOT) + output = p.communicate(input=contents.encode())[0] + + if p.returncode != 0: + raise Exception('Could not format contents') + + formatted = output.decode('utf-8') + if formatted != contents: + logger.error("{} was not correctly formatted.".format(filename)) + + return cursor, False + + +####### +# Source pipeline definitions + + +AUTO_FORMAT_LANGUAGES = [ + { + "name": "C++ Main", + "src": path.join(PROJECT_ROOT, "src"), + + "pipelines": { + "hpp": [ + require_pragma_once, + enforce_cpp_license, + enforce_formatting + ], + "cpp": [ + enforce_cpp_license, + enforce_formatting + ] + } + }, + { + "name": "Scripts", + "src": path.join(PROJECT_ROOT, "scripts"), + + "pipelines": { + "py": [ + enforce_py_license, + ], + } + } +] + + +def execute_pipeline(pipeline, filename: str, dry_run: bool): + logger.info("Executing pipeline for {}".format(filename)) + cursor = 0 + + with open(filename, "r") as fb: + contents = fb.read() + + failed = False + for fnc in pipeline: + cursor, f = fnc(filename, contents, cursor, dry_run) + failed = failed or f + + return failed + + +def main(dry_run: bool = True): + failed = False + + for language in AUTO_FORMAT_LANGUAGES: + logger.info("Formatting {}".format(language["name"])) + basedir = language["src"] + pipelines = language["pipelines"] + + for root, dirs, files in os.walk(basedir): + + for filename in files: + if "." not in filename: + continue + + _, ext = filename.rsplit(".", 1) + if ext in pipelines: + f = execute_pipeline(pipelines[ext], path.join(root, filename), dry_run) + failed = failed or f + + if failed: + logger.error("Your code did not pass formatting.") + + return failed + + +if __name__ == "__main__": + if main(): + exit(-1) diff --git a/src/QsPasses/scripts/ToolChain/__init__.py b/src/QsPasses/scripts/ToolChain/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/QsPasses/src/GateCounter/GateCounter.cpp b/src/QsPasses/src/GateCounter/GateCounter.cpp new file mode 100644 index 0000000000..540a85e626 --- /dev/null +++ b/src/QsPasses/src/GateCounter/GateCounter.cpp @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "Llvm.hpp" + +using namespace llvm; + +namespace { + +void visitor(Function &F) +{ + errs() << "(gate-counter) " << F.getName() << "\n"; + errs() << "(gate-counter) number of arguments: " << F.arg_size() << "\n"; +} + +struct GateCounterPass : PassInfoMixin +{ + PreservedAnalyses run(Function &F, FunctionAnalysisManager &) + { + visitor(F); + + return PreservedAnalyses::all(); + } +}; + +struct LegacyGateCounterPass : public FunctionPass +{ + static char ID; + LegacyGateCounterPass() + : FunctionPass(ID) + {} + + bool runOnFunction(Function &F) override + { + visitor(F); + return false; + } +}; +} // namespace + +llvm::PassPluginLibraryInfo getGateCounterPluginInfo() +{ + return {LLVM_PLUGIN_API_VERSION, "GateCounter", LLVM_VERSION_STRING, [](PassBuilder &PB) { + PB.registerPipelineParsingCallback([](StringRef Name, FunctionPassManager &FPM, + ArrayRef) { + if (Name == "gate-counter") + { + FPM.addPass(GateCounterPass()); + return true; + } + return false; + }); + }}; +} + +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return getGateCounterPluginInfo(); +} + +char LegacyGateCounterPass::ID = 0; +static RegisterPass LegacyGateCounterRegistration("legacy-gate-counter", + "Gate Counter Pass", true, + false); diff --git a/src/QsPasses/src/Llvm.hpp b/src/QsPasses/src/Llvm.hpp new file mode 100644 index 0000000000..408ad61cdf --- /dev/null +++ b/src/QsPasses/src/Llvm.hpp @@ -0,0 +1,37 @@ +#pragma once +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#if defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#pragma GCC diagnostic ignored "-Wpedantic" +#pragma GCC diagnostic ignored "-Wunused-value" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wunknown-warning-option" +#pragma GCC diagnostic ignored "-Wunused-parameter" +#endif + +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wconversion" +#pragma clang diagnostic ignored "-Wpedantic" +#pragma clang diagnostic ignored "-Werror" +#pragma clang diagnostic ignored "-Wshadow" +#pragma clang diagnostic ignored "-Wreturn-std-move" +#pragma clang diagnostic ignored "-Wunknown-warning-option" +#pragma clang diagnostic ignored "-Wunused-parameter" +#endif + +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/Passes/PassBuilder.h" +#include "llvm/Passes/PassPlugin.h" +#include "llvm/Support/raw_ostream.h" + +#if defined(__clang__) +#pragma clang diagnostic pop +#endif + +#if defined(__GNUC__) +#pragma GCC diagnostic pop +#endif \ No newline at end of file From 199eed46f09e14fb014b9596cfa9ede41fea1c32 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 20 Jul 2021 14:34:20 +0200 Subject: [PATCH 04/48] Making CLI interface for CI tasks --- src/QsPasses/Makefile | 7 +- src/QsPasses/README.md | 11 +++ src/QsPasses/develop.env | 3 + src/QsPasses/requirements.txt | 1 + src/QsPasses/scripts/Builder/__init__.py | 0 src/QsPasses/scripts/ToolChain/__init__.py | 0 .../site-packages/PassCI/Builder/__init__.py | 73 +++++++++++++++++++ .../PassCI}/FormatSource/__init__.py | 10 +-- src/QsPasses/site-packages/PassCI/Project.py | 13 ++++ .../PassCI/ToolChain/__init__.py | 8 ++ src/QsPasses/site-packages/PassCI/__main__.py | 68 +++++++++++++++++ 11 files changed, 184 insertions(+), 10 deletions(-) create mode 100644 src/QsPasses/develop.env create mode 100644 src/QsPasses/requirements.txt delete mode 100644 src/QsPasses/scripts/Builder/__init__.py delete mode 100644 src/QsPasses/scripts/ToolChain/__init__.py create mode 100644 src/QsPasses/site-packages/PassCI/Builder/__init__.py rename src/QsPasses/{scripts => site-packages/PassCI}/FormatSource/__init__.py (93%) create mode 100644 src/QsPasses/site-packages/PassCI/Project.py create mode 100644 src/QsPasses/site-packages/PassCI/ToolChain/__init__.py create mode 100644 src/QsPasses/site-packages/PassCI/__main__.py diff --git a/src/QsPasses/Makefile b/src/QsPasses/Makefile index b5709e0fa4..324c136291 100644 --- a/src/QsPasses/Makefile +++ b/src/QsPasses/Makefile @@ -1,12 +1,13 @@ stylecheck: - @python scripts/FormatSource/__init__.py + @source develop.env && python -m PassCI --stylecheck + + lint: @echo "Static analysis not added yet" tests: - mkdir Debug - cd Debug && cmake .. && make -j4 && ctest + @source develop.env && python -m PassCI test clean: rm -rf Release/ diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index 85a3d5e1a3..65f9ef8206 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -49,6 +49,17 @@ Yet to be written Before making a pull request with changes to this library, please ensure that style checks passes, that the code compiles, unit test passes and that there are no erros found by the static analyser. +To setup the CI environment, run following commands + +```sh +source develop.env +virtualenv develop__venv +source develop__venv/bin/activate +pip install -r requirements.txt +``` + +These adds the necessary environment variables to ensure that you have the `PassCI` package and all required dependencies. + To check the style, run ```sh diff --git a/src/QsPasses/develop.env b/src/QsPasses/develop.env new file mode 100644 index 0000000000..002d66b4ee --- /dev/null +++ b/src/QsPasses/develop.env @@ -0,0 +1,3 @@ +#!/bin/sh + +export PYTHONPATH=$PYTHONPATH:$PWD/site-packages \ No newline at end of file diff --git a/src/QsPasses/requirements.txt b/src/QsPasses/requirements.txt new file mode 100644 index 0000000000..77c1d85ae8 --- /dev/null +++ b/src/QsPasses/requirements.txt @@ -0,0 +1 @@ +click==8.0.1 diff --git a/src/QsPasses/scripts/Builder/__init__.py b/src/QsPasses/scripts/Builder/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/QsPasses/scripts/ToolChain/__init__.py b/src/QsPasses/scripts/ToolChain/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/QsPasses/site-packages/PassCI/Builder/__init__.py b/src/QsPasses/site-packages/PassCI/Builder/__init__.py new file mode 100644 index 0000000000..9b77c7035d --- /dev/null +++ b/src/QsPasses/site-packages/PassCI/Builder/__init__.py @@ -0,0 +1,73 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os +from .. import Project +from ..Project import PROJECT_ROOT +import logging +import subprocess +import sys + +logger = logging.getLogger() + + +def configure_cmake(build_dir: str, generator=None): + + logger.info("Source: {}".format(PROJECT_ROOT)) + logger.info("Build : {}".format(build_dir)) + + os.chdir(PROJECT_ROOT) + os.makedirs(build_dir, exist_ok=True) + + cmake_cmd = ['cmake'] # TODO: get from toolchain + + if generator is not None: + cmake_cmd += ['-G', generator] + + cmake_cmd += [PROJECT_ROOT] + + exit_code = subprocess.call(cmake_cmd, cwd=build_dir) + if exit_code != 0: + logger.error('Failed to configure project') + sys.exit(exit_code) + + +def build_project(build_dir: str, generator=None, concurrency=None): + + if generator in ["make", None]: + cmd = ["make"] + elif generator in ["ninja"]: + cmd = ["ninja"] + + if concurrency is None: + concurrency = Project.get_concurrency() + + cmd.append('-j{}'.format(concurrency)) + + exit_code = subprocess.call(cmd, cwd=build_dir) + + if exit_code != 0: + logger.error('Failed to make the project') + sys.exit(exit_code) + + +def run_tests(build_dir: str, concurrency=None): + cmake_cmd = ['ctest'] # TODO: get from toolchain + + if concurrency is not None: + raise BaseException("No support for concurrent testing at the moment.") + + exit_code = subprocess.call(cmake_cmd, cwd=build_dir) + if exit_code != 0: + logger.error('Failed to configure project') + sys.exit(exit_code) + + +def main(build_dir: str, generator=None, test: bool = False): + + configure_cmake(build_dir, generator) + + build_project(build_dir, generator) + + if test: + run_tests(build_dir) diff --git a/src/QsPasses/scripts/FormatSource/__init__.py b/src/QsPasses/site-packages/PassCI/FormatSource/__init__.py similarity index 93% rename from src/QsPasses/scripts/FormatSource/__init__.py rename to src/QsPasses/site-packages/PassCI/FormatSource/__init__.py index f8f6e3929e..299a53cb70 100644 --- a/src/QsPasses/scripts/FormatSource/__init__.py +++ b/src/QsPasses/site-packages/PassCI/FormatSource/__init__.py @@ -6,14 +6,10 @@ import logging import subprocess - -def discover_formatter(): - # TODO(TFR): Auto discover, use full path - return "clang-format" - +from ..Project import PROJECT_ROOT +from ..ToolChain import discover_formatter logger = logging.getLogger() -PROJECT_ROOT = path.abspath(path.dirname(path.dirname(path.dirname(__file__)))) CLANG_FORMAT_EXE = discover_formatter() ####### @@ -96,7 +92,7 @@ def enforce_formatting(filename, contents, cursor, dry_run): }, { "name": "Scripts", - "src": path.join(PROJECT_ROOT, "scripts"), + "src": path.join(PROJECT_ROOT, "site-packages"), "pipelines": { "py": [ diff --git a/src/QsPasses/site-packages/PassCI/Project.py b/src/QsPasses/site-packages/PassCI/Project.py new file mode 100644 index 0000000000..ef4ba177cd --- /dev/null +++ b/src/QsPasses/site-packages/PassCI/Project.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from os import path +import multiprocessing + +PROJECT_ROOT = path.abspath(path.dirname(path.dirname(path.dirname(__file__)))) + +MAX_CONCURRENCY = 7 + + +def get_concurrency(): + return min(MAX_CONCURRENCY, multiprocessing.cpu_count()) diff --git a/src/QsPasses/site-packages/PassCI/ToolChain/__init__.py b/src/QsPasses/site-packages/PassCI/ToolChain/__init__.py new file mode 100644 index 0000000000..7cb807a69b --- /dev/null +++ b/src/QsPasses/site-packages/PassCI/ToolChain/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import shutil + + +def discover_formatter(): + return shutil.which("clang-format") diff --git a/src/QsPasses/site-packages/PassCI/__main__.py b/src/QsPasses/site-packages/PassCI/__main__.py new file mode 100644 index 0000000000..9a9e74fc01 --- /dev/null +++ b/src/QsPasses/site-packages/PassCI/__main__.py @@ -0,0 +1,68 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +from .FormatSource import main as style_check_main +from .Builder import main as builder_main + +import click +import logging +import sys + +logger = logging.getLogger() + +# Logging configuration +ch = logging.StreamHandler() +ch.setLevel(logging.DEBUG) +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +ch.setFormatter(formatter) +logger.addHandler(ch) + +# By default we only log errors +logger.setLevel(logging.ERROR) + + +@click.group() +@click.option('--loglevel', default="error") +def cli(loglevel): + levels = { + "critical": 50, + "error": 40, + "warning": 30, + "info": 20, + "debug": 10, + "notset": 0 + } + + loglevel = loglevel.lower() + if loglevel not in levels: + logger.critical("Invalid log level") + sys.exit(-1) + + logger.setLevel(levels[loglevel]) + logger.info("Loglevel set to {}".format(loglevel)) + + +@cli.command() +@click.option('--fix-issues/--no-fix-issues', default=False) +def style_check(fix_issues): + logger.info("Invoking style checker") + + style_check_main() + + +@cli.command() +@click.option('--debug/--no-debug', default=True) +@click.option('--generator', default=None) +def test(debug, generator): + logger.info("Building and testing") + + build_dir = "Debug" + if not debug: + build_dir = "Release" + + builder_main(build_dir, generator, True) + + +if __name__ == '__main__': + cli() From de323ef36321ce600e7b638d6239f64669143412 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 11:28:59 +0200 Subject: [PATCH 05/48] Finishing V1 of CI script with updated clang tidy and format --- src/QsPasses/.clang-format | 81 ++++++------- src/QsPasses/.clang-tidy | 38 +++++- src/QsPasses/CMakeLists.txt | 2 + src/QsPasses/Makefile | 12 +- src/QsPasses/README.md | 6 +- .../{PassCI => TasksCI}/Builder/__init__.py | 0 .../FormatSource/__init__.py | 44 ++++--- .../site-packages/TasksCI/Linting/__init__.py | 110 ++++++++++++++++++ .../{PassCI => TasksCI}/Project.py | 0 .../{PassCI => TasksCI}/ToolChain/__init__.py | 4 + .../{PassCI => TasksCI}/__main__.py | 35 +++++- src/QsPasses/src/GateCounter/GateCounter.cpp | 81 +++++++------ 12 files changed, 304 insertions(+), 109 deletions(-) rename src/QsPasses/site-packages/{PassCI => TasksCI}/Builder/__init__.py (100%) rename src/QsPasses/site-packages/{PassCI => TasksCI}/FormatSource/__init__.py (73%) create mode 100644 src/QsPasses/site-packages/TasksCI/Linting/__init__.py rename src/QsPasses/site-packages/{PassCI => TasksCI}/Project.py (100%) rename src/QsPasses/site-packages/{PassCI => TasksCI}/ToolChain/__init__.py (74%) rename src/QsPasses/site-packages/{PassCI => TasksCI}/__main__.py (56%) diff --git a/src/QsPasses/.clang-format b/src/QsPasses/.clang-format index f44b5289b3..329e8f956d 100644 --- a/src/QsPasses/.clang-format +++ b/src/QsPasses/.clang-format @@ -1,57 +1,60 @@ +# https://clang.llvm.org/docs/ClangFormatStyleOptions.html + --- -BasedOnStyle: Google Language: Cpp +BasedOnStyle: Microsoft + +# page width +ColumnLimit: 120 +ReflowComments: true + +# tabs and indents +UseTab: Never +IndentWidth: 4 +TabWidth: 4 AccessModifierOffset: -2 +NamespaceIndentation: Inner + +# line and statements layout +BreakBeforeBraces: Allman +BinPackParameters: false +AlignAfterOpenBracket: AlwaysBreak +AllowShortIfStatementsOnASingleLine: WithoutElse +AllowShortFunctionsOnASingleLine: Empty +AllowAllConstructorInitializersOnNextLine: false +AllowAllArgumentsOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: false +BreakBeforeTernaryOperators: true +BreakConstructorInitializers: BeforeComma + +# misc +Cpp11BracedListStyle: true +FixNamespaceComments: true +IncludeBlocks: Preserve +SpaceBeforeInheritanceColon : true +SpaceBeforeParens: ControlStatements +DerivePointerAlignment: false +PointerAlignment: Left + +# Suggestion +Standard: Cpp11 AlignConsecutiveAssignments: true AlignConsecutiveDeclarations: true AlignTrailingComments: true -AllowShortCaseLabelsOnASingleLine: false -AllowShortFunctionsOnASingleLine: None -AllowShortIfStatementsOnASingleLine: false -AllowShortLoopsOnASingleLine: false -BraceWrapping: - AfterClass: true - AfterControlStatement: true - AfterEnum: true - AfterFunction: true - AfterNamespace: false - AfterStruct: true - AfterUnion: true - AfterExternBlock: true - BeforeCatch: true - BeforeElse: true - SplitEmptyFunction: false -BreakBeforeBraces: Custom -BreakConstructorInitializers: BeforeComma -ColumnLimit: 100 -ConstructorInitializerAllOnOneLineOrOnePerLine: false -ConstructorInitializerIndentWidth: 2 -ContinuationIndentWidth: 4 -DerivePointerAlignment: false -IncludeBlocks: Regroup -IncludeCategories: + +# Ensures include compleness +IncludeCategories: - Regex: '.*\.\..*' Priority: 1 - Regex: '^<.*\.h.*>$' Priority: 5 - Regex: '^<.*>$' Priority: 6 - - Regex: '^"(gtest)|(gmock)|(benchmark)/.*"$' + - Regex: '^"(llvm)/.*"$' Priority: 4 - Regex: '.*/.*' Priority: 3 - Regex: '.*' Priority: 2 IncludeIsMainRegex: '' -IndentCaseLabels: false -IndentWidth: 2 -KeepEmptyLinesAtTheStartOfBlocks: true -MaxEmptyLinesToKeep: 1 -NamespaceIndentation: None -PointerAlignment: Right -SortIncludes: true -SortUsingDeclarations: true -SpaceInEmptyParentheses: false -SpacesInAngles: false -Standard: Cpp11 -UseTab: Never + diff --git a/src/QsPasses/.clang-tidy b/src/QsPasses/.clang-tidy index f86dcb3ccf..d1f58c04c2 100644 --- a/src/QsPasses/.clang-tidy +++ b/src/QsPasses/.clang-tidy @@ -1,5 +1,7 @@ -Checks: "-*,\ -bugprone-*,\ +Checks: "-*,bugprone-*,\ +-readability-*,\ +readability-identifier-*,\ +readability-braces-around-statements,\ cert-dcl*,\ cert-env*,\ cert-err52-cpp,\ @@ -16,7 +18,31 @@ hicpp-explicit-conversions,\ hicpp-use-*,\ misc-*,\ -misc-misplaced-widening-cast,\ -modernize-*,\ -performance-*,\ -readability-*,\ --readability-identifier-naming" \ No newline at end of file +performance-*" + +WarningsAsErrors: '*' +HeaderFilterRegex: '.*' + +CheckOptions: + - key: readability-identifier-naming.ClassCase + value: 'CamelCase' + - key: readability-identifier-naming.ClassPrefix + value: 'C' + - key: readability-identifier-naming.AbstractClassPrefix + value: 'I' + - key: readability-identifier-naming.StructCase + value: 'CamelCase' + - key: readability-identifier-naming.ParameterCase + value: 'camelBack' + - key: readability-identifier-naming.PrivateMemberCase + value: 'camelBack' + - key: readability-identifier-naming.LocalVariableCase + value: 'camelBack' + - key: readability-identifier-naming.TypeAliasCase + value: 'CamelCase' + - key: readability-identifier-naming.UnionCase + value: 'CamelCase' + - key: readability-identifier-naming.FunctionCase + value: 'CamelCase' + - key: readability-identifier-naming.NamespaceCase + value: 'CamelCase' diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt index eb5eb53301..4135da0dd6 100644 --- a/src/QsPasses/CMakeLists.txt +++ b/src/QsPasses/CMakeLists.txt @@ -14,6 +14,8 @@ set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wconversion -Wpedantic") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") +# Needed for clang-tidy +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) include_directories(${LLVM_INCLUDE_DIRS}) add_definitions(${LLVM_DEFINITIONS}) diff --git a/src/QsPasses/Makefile b/src/QsPasses/Makefile index 324c136291..0a281789aa 100644 --- a/src/QsPasses/Makefile +++ b/src/QsPasses/Makefile @@ -1,14 +1,18 @@ stylecheck: - @source develop.env && python -m PassCI --stylecheck - + @source develop.env && python -m TasksCI stylecheck +fixstyle: + @source develop.env && python -m TasksCI --loglevel warning stylecheck --fix-issues lint: - @echo "Static analysis not added yet" + @source develop.env && python -m TasksCI lint tests: - @source develop.env && python -m PassCI test + @source develop.env && python -m TasksCI test + clean: rm -rf Release/ rm -rf Debug/ + +runci: stylecheck lint tests clean diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index 65f9ef8206..b9a2728355 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -58,7 +58,7 @@ source develop__venv/bin/activate pip install -r requirements.txt ``` -These adds the necessary environment variables to ensure that you have the `PassCI` package and all required dependencies. +These adds the necessary environment variables to ensure that you have the `TasksCI` package and all required dependencies. To check the style, run @@ -80,3 +80,7 @@ make lint As `clang-tidy` and `clang-format` acts slightly different from version to version and on different platforms, it is recommended that you use a docker image to perform these steps. + +# TODOs + +Look at https://github.com/llvm-mirror/clang-tools-extra/blob/master/clang-tidy/tool/run-clang-tidy.py diff --git a/src/QsPasses/site-packages/PassCI/Builder/__init__.py b/src/QsPasses/site-packages/TasksCI/Builder/__init__.py similarity index 100% rename from src/QsPasses/site-packages/PassCI/Builder/__init__.py rename to src/QsPasses/site-packages/TasksCI/Builder/__init__.py diff --git a/src/QsPasses/site-packages/PassCI/FormatSource/__init__.py b/src/QsPasses/site-packages/TasksCI/FormatSource/__init__.py similarity index 73% rename from src/QsPasses/site-packages/PassCI/FormatSource/__init__.py rename to src/QsPasses/site-packages/TasksCI/FormatSource/__init__.py index 299a53cb70..13f5a4e72b 100644 --- a/src/QsPasses/site-packages/PassCI/FormatSource/__init__.py +++ b/src/QsPasses/site-packages/TasksCI/FormatSource/__init__.py @@ -5,18 +5,19 @@ import os import logging import subprocess +import sys from ..Project import PROJECT_ROOT from ..ToolChain import discover_formatter -logger = logging.getLogger() +logger = logging.getLogger("FormatChecker") CLANG_FORMAT_EXE = discover_formatter() ####### # Style pipeline components -def require_token(token, filename, contents, cursor, dry_run): +def require_token(token, filename, contents, cursor, fix_issues): failed = False if not contents[cursor:].startswith(token): logger.error("{}: File must have {} at position {}".format(filename, token, cursor)) @@ -24,23 +25,23 @@ def require_token(token, filename, contents, cursor, dry_run): return cursor + len(token), failed -def require_pragma_once(filename, contents, cursor, dry_run): - return require_token("#pragma once\n", filename, contents, cursor, dry_run) +def require_pragma_once(filename, contents, cursor, fix_issues): + return require_token("#pragma once\n", filename, contents, cursor, fix_issues) -def require_todo_owner(filename, contents, cursor, dry_run): +def require_todo_owner(filename, contents, cursor, fix_issues): # TODO(tfr): implement return cursor, False -def enforce_cpp_license(filename, contents, cursor, dry_run): +def enforce_cpp_license(filename, contents, cursor, fix_issues): return require_token("""// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -""", filename, contents, cursor, dry_run) +""", filename, contents, cursor, fix_issues) -def enforce_py_license(filename, contents, cursor, dry_run): +def enforce_py_license(filename, contents, cursor, fix_issues): # Allowing empty files if contents.strip() == "": return cursor, False @@ -48,10 +49,10 @@ def enforce_py_license(filename, contents, cursor, dry_run): return require_token("""# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -""", filename, contents, cursor, dry_run) +""", filename, contents, cursor, fix_issues) -def enforce_formatting(filename, contents, cursor, dry_run): +def enforce_formatting(filename, contents, cursor, fix_issues): p = subprocess.Popen( [CLANG_FORMAT_EXE, '-style=file'], stdout=subprocess.PIPE, @@ -64,7 +65,16 @@ def enforce_formatting(filename, contents, cursor, dry_run): formatted = output.decode('utf-8') if formatted != contents: + + # Updating the contents of the file + if fix_issues: + logger.info("Formatting {}".format(filename)) + with open(filename, "w") as filebuffer: + filebuffer.write(formatted) + return cursor, False + logger.error("{} was not correctly formatted.".format(filename)) + return cursor, True return cursor, False @@ -103,7 +113,7 @@ def enforce_formatting(filename, contents, cursor, dry_run): ] -def execute_pipeline(pipeline, filename: str, dry_run: bool): +def execute_pipeline(pipeline, filename: str, fix_issues: bool): logger.info("Executing pipeline for {}".format(filename)) cursor = 0 @@ -112,13 +122,13 @@ def execute_pipeline(pipeline, filename: str, dry_run: bool): failed = False for fnc in pipeline: - cursor, f = fnc(filename, contents, cursor, dry_run) + cursor, f = fnc(filename, contents, cursor, fix_issues) failed = failed or f return failed -def main(dry_run: bool = True): +def main(fix_issues: bool = False): failed = False for language in AUTO_FORMAT_LANGUAGES: @@ -134,15 +144,13 @@ def main(dry_run: bool = True): _, ext = filename.rsplit(".", 1) if ext in pipelines: - f = execute_pipeline(pipelines[ext], path.join(root, filename), dry_run) + f = execute_pipeline(pipelines[ext], path.join(root, filename), fix_issues) failed = failed or f if failed: logger.error("Your code did not pass formatting.") - - return failed + sys.exit(-1) if __name__ == "__main__": - if main(): - exit(-1) + main() diff --git a/src/QsPasses/site-packages/TasksCI/Linting/__init__.py b/src/QsPasses/site-packages/TasksCI/Linting/__init__.py new file mode 100644 index 0000000000..538a0bfa07 --- /dev/null +++ b/src/QsPasses/site-packages/TasksCI/Linting/__init__.py @@ -0,0 +1,110 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import logging +from ..Builder import configure_cmake, build_project +from .. import ToolChain +from ..Project import PROJECT_ROOT +import os +import subprocess +import sys + +logger = logging.getLogger("Linter") + + +def clang_tidy_diagnose(): + config = subprocess.check_output( + [ToolChain.discover_tidy(), '-dump-config'], cwd=PROJECT_ROOT).decode() + + check_list = subprocess.check_output( + [ToolChain.discover_tidy(), '-list-checks'], cwd=PROJECT_ROOT).decode() + + checks = [x.strip() for x in check_list.split("\n") if '-' in x] + + print("Working directory: {}".format(PROJECT_ROOT)) + print("") + print(config) + print("") + print("Clang tidy checks:") + for check in sorted(checks): + print(" -", check) + + +def run_clang_tidy(source_dir, build_dir, filename, fix_issues: bool = False): + clang_tidy_binary = ToolChain.discover_tidy() + + cmd = [clang_tidy_binary] + output_file = os.path.abspath(os.path.join(build_dir, 'clang_tidy_fixes.yaml')) + + cmd.append('-header-filter=".*(QsPasses)\\/(src).*\\.hpp$"') + cmd.append('-p=' + build_dir) + cmd.append('-export-fixes={}'.format(output_file)) + cmd.append('--use-color') + + if fix_issues: + cmd.append("-fix") + + cmd.append(filename) + + p = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=PROJECT_ROOT) + + output, err = p.communicate() + + if p.returncode != 0: + output = output.decode() + err = err.decode() + + if "error" in err: + # TODO(TFR): write output and errors to temp log file + sys.stderr.write(output) + sys.stderr.write(err) + + logger.error("{} failed static analysis".format(filename)) + return False + + logger.info("All good!") + return True + + +def main_cpp(fix_issues: bool): + logger.info("Linting") + build_dir = os.path.join(PROJECT_ROOT, "Debug") + source_dir = os.path.join(PROJECT_ROOT, "src") + generator = None + extensions = ["cpp"] + + # Configuring CMake + configure_cmake(build_dir, generator) + + # Building + build_project(build_dir, generator) + + # Generating list of files + # TODO(TFR): Ensure that it is only those which were changed that are + # analysed + files_to_analyse = [] + + for root, dirs, files in os.walk(source_dir): + + for filename in files: + if "." not in filename: + continue + + _, ext = filename.rsplit(".", 1) + if ext in extensions: + files_to_analyse.append(os.path.join(root, filename)) + + success = True + for filename in files_to_analyse: + success = success and run_clang_tidy(source_dir, build_dir, filename, fix_issues=fix_issues) + return success + + +def main(fix_issues: bool): + if not main_cpp(fix_issues): + sys.exit(-1) diff --git a/src/QsPasses/site-packages/PassCI/Project.py b/src/QsPasses/site-packages/TasksCI/Project.py similarity index 100% rename from src/QsPasses/site-packages/PassCI/Project.py rename to src/QsPasses/site-packages/TasksCI/Project.py diff --git a/src/QsPasses/site-packages/PassCI/ToolChain/__init__.py b/src/QsPasses/site-packages/TasksCI/ToolChain/__init__.py similarity index 74% rename from src/QsPasses/site-packages/PassCI/ToolChain/__init__.py rename to src/QsPasses/site-packages/TasksCI/ToolChain/__init__.py index 7cb807a69b..f666565860 100644 --- a/src/QsPasses/site-packages/PassCI/ToolChain/__init__.py +++ b/src/QsPasses/site-packages/TasksCI/ToolChain/__init__.py @@ -6,3 +6,7 @@ def discover_formatter(): return shutil.which("clang-format") + + +def discover_tidy(): + return shutil.which("clang-tidy") diff --git a/src/QsPasses/site-packages/PassCI/__main__.py b/src/QsPasses/site-packages/TasksCI/__main__.py similarity index 56% rename from src/QsPasses/site-packages/PassCI/__main__.py rename to src/QsPasses/site-packages/TasksCI/__main__.py index 9a9e74fc01..947c49bf7b 100644 --- a/src/QsPasses/site-packages/PassCI/__main__.py +++ b/src/QsPasses/site-packages/TasksCI/__main__.py @@ -4,6 +4,7 @@ from .FormatSource import main as style_check_main from .Builder import main as builder_main +from .Linting import main as lint_main, clang_tidy_diagnose import click import logging @@ -44,11 +45,39 @@ def cli(loglevel): @cli.command() -@click.option('--fix-issues/--no-fix-issues', default=False) -def style_check(fix_issues): +@click.option('--fix-issues', default=False, is_flag=True) +def stylecheck(fix_issues): logger.info("Invoking style checker") - style_check_main() + style_check_main(fix_issues) + + +@cli.command() +@click.option("--diagnose", default=False, is_flag=True) +@click.option('--fix-issues', default=False, is_flag=True) +@click.option('--force', default=False, is_flag=True) +def lint(diagnose, fix_issues, force): + if diagnose: + clang_tidy_diagnose() + return + + if fix_issues: + if not force: + print("""Fixing isssues using Clang Tidy will break your code. +Make sure that you have committed your changes BEFORE DOING THIS. +Even so, this feature is experimental and there have been reports of +clang-tidy modying system libraries - therefore, USE THIS FEATURE AT +YOUR OWN RISK. + +Write 'I understand' to proceed.""") + print(":") + x = input() + if x.lower() != "i understand": + print("Wrong answer - stopping!") + exit(-1) + + logger.info("Invoking linter") + lint_main(fix_issues) @cli.command() diff --git a/src/QsPasses/src/GateCounter/GateCounter.cpp b/src/QsPasses/src/GateCounter/GateCounter.cpp index 540a85e626..194f49ab06 100644 --- a/src/QsPasses/src/GateCounter/GateCounter.cpp +++ b/src/QsPasses/src/GateCounter/GateCounter.cpp @@ -5,60 +5,65 @@ using namespace llvm; -namespace { +namespace +{ -void visitor(Function &F) +void Visitor(Function& f) { - errs() << "(gate-counter) " << F.getName() << "\n"; - errs() << "(gate-counter) number of arguments: " << F.arg_size() << "\n"; + errs() << "(gate-counter) " << f.getName() << "\n"; + errs() << "(gate-counter) number of arguments: " << f.arg_size() << "\n"; } struct GateCounterPass : PassInfoMixin { - PreservedAnalyses run(Function &F, FunctionAnalysisManager &) - { - visitor(F); + static auto run(Function& f, FunctionAnalysisManager& /*unused*/) -> PreservedAnalyses // NOLINT + { + Visitor(f); - return PreservedAnalyses::all(); - } + return PreservedAnalyses::all(); + } }; -struct LegacyGateCounterPass : public FunctionPass +class CLegacyGateCounterPass : public FunctionPass { - static char ID; - LegacyGateCounterPass() - : FunctionPass(ID) - {} + public: + static char ID; + CLegacyGateCounterPass() + : FunctionPass(ID) + { + } - bool runOnFunction(Function &F) override - { - visitor(F); - return false; - } + auto runOnFunction(Function& f) -> bool override + { + Visitor(f); + return false; + } }; -} // namespace +} // namespace -llvm::PassPluginLibraryInfo getGateCounterPluginInfo() +auto GetGateCounterPluginInfo() -> llvm::PassPluginLibraryInfo { - return {LLVM_PLUGIN_API_VERSION, "GateCounter", LLVM_VERSION_STRING, [](PassBuilder &PB) { - PB.registerPipelineParsingCallback([](StringRef Name, FunctionPassManager &FPM, - ArrayRef) { - if (Name == "gate-counter") - { - FPM.addPass(GateCounterPass()); - return true; - } - return false; - }); - }}; + return {LLVM_PLUGIN_API_VERSION, "GateCounter", LLVM_VERSION_STRING, [](PassBuilder& pb) { + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { + if (name == "gate-counter") + { + fpm.addPass(GateCounterPass()); + return true; + } + return false; + }); + }}; } -extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +extern "C" LLVM_ATTRIBUTE_WEAK auto llvmGetPassPluginInfo() -> ::llvm::PassPluginLibraryInfo { - return getGateCounterPluginInfo(); + return GetGateCounterPluginInfo(); } -char LegacyGateCounterPass::ID = 0; -static RegisterPass LegacyGateCounterRegistration("legacy-gate-counter", - "Gate Counter Pass", true, - false); +char CLegacyGateCounterPass::ID = 0; +static RegisterPass LegacyGateCounterRegistration( + "legacy-gate-counter", + "Gate Counter Pass", + true, + false); From cfb4b9354459448037d32778f7787d35cc7b41a3 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 11:34:25 +0200 Subject: [PATCH 06/48] Refactoring CI module --- src/QsPasses/site-packages/TasksCI/__main__.py | 6 +++--- .../TasksCI/{Builder/__init__.py => builder.py} | 6 +++--- .../{FormatSource/__init__.py => formatting.py} | 4 ++-- .../TasksCI/{Linting/__init__.py => linting.py} | 12 ++++++------ .../TasksCI/{Project.py => settings.py} | 0 .../TasksCI/{ToolChain/__init__.py => toolchain.py} | 0 6 files changed, 14 insertions(+), 14 deletions(-) rename src/QsPasses/site-packages/TasksCI/{Builder/__init__.py => builder.py} (94%) rename src/QsPasses/site-packages/TasksCI/{FormatSource/__init__.py => formatting.py} (98%) rename src/QsPasses/site-packages/TasksCI/{Linting/__init__.py => linting.py} (90%) rename src/QsPasses/site-packages/TasksCI/{Project.py => settings.py} (100%) rename src/QsPasses/site-packages/TasksCI/{ToolChain/__init__.py => toolchain.py} (100%) diff --git a/src/QsPasses/site-packages/TasksCI/__main__.py b/src/QsPasses/site-packages/TasksCI/__main__.py index 947c49bf7b..69e83e40ef 100644 --- a/src/QsPasses/site-packages/TasksCI/__main__.py +++ b/src/QsPasses/site-packages/TasksCI/__main__.py @@ -2,9 +2,9 @@ # Licensed under the MIT License. -from .FormatSource import main as style_check_main -from .Builder import main as builder_main -from .Linting import main as lint_main, clang_tidy_diagnose +from .formatting import main as style_check_main +from .builder import main as builder_main +from .linting import main as lint_main, clang_tidy_diagnose import click import logging diff --git a/src/QsPasses/site-packages/TasksCI/Builder/__init__.py b/src/QsPasses/site-packages/TasksCI/builder.py similarity index 94% rename from src/QsPasses/site-packages/TasksCI/Builder/__init__.py rename to src/QsPasses/site-packages/TasksCI/builder.py index 9b77c7035d..329f1cbb32 100644 --- a/src/QsPasses/site-packages/TasksCI/Builder/__init__.py +++ b/src/QsPasses/site-packages/TasksCI/builder.py @@ -2,8 +2,8 @@ # Licensed under the MIT License. import os -from .. import Project -from ..Project import PROJECT_ROOT +from . import settings +from .settings import PROJECT_ROOT import logging import subprocess import sys @@ -40,7 +40,7 @@ def build_project(build_dir: str, generator=None, concurrency=None): cmd = ["ninja"] if concurrency is None: - concurrency = Project.get_concurrency() + concurrency = settings.get_concurrency() cmd.append('-j{}'.format(concurrency)) diff --git a/src/QsPasses/site-packages/TasksCI/FormatSource/__init__.py b/src/QsPasses/site-packages/TasksCI/formatting.py similarity index 98% rename from src/QsPasses/site-packages/TasksCI/FormatSource/__init__.py rename to src/QsPasses/site-packages/TasksCI/formatting.py index 13f5a4e72b..9cf87c609b 100644 --- a/src/QsPasses/site-packages/TasksCI/FormatSource/__init__.py +++ b/src/QsPasses/site-packages/TasksCI/formatting.py @@ -7,8 +7,8 @@ import subprocess import sys -from ..Project import PROJECT_ROOT -from ..ToolChain import discover_formatter +from .settings import PROJECT_ROOT +from .toolchain import discover_formatter logger = logging.getLogger("FormatChecker") CLANG_FORMAT_EXE = discover_formatter() diff --git a/src/QsPasses/site-packages/TasksCI/Linting/__init__.py b/src/QsPasses/site-packages/TasksCI/linting.py similarity index 90% rename from src/QsPasses/site-packages/TasksCI/Linting/__init__.py rename to src/QsPasses/site-packages/TasksCI/linting.py index 538a0bfa07..6f12f49d3c 100644 --- a/src/QsPasses/site-packages/TasksCI/Linting/__init__.py +++ b/src/QsPasses/site-packages/TasksCI/linting.py @@ -2,9 +2,9 @@ # Licensed under the MIT License. import logging -from ..Builder import configure_cmake, build_project -from .. import ToolChain -from ..Project import PROJECT_ROOT +from .builder import configure_cmake, build_project +from . import toolchain +from .settings import PROJECT_ROOT import os import subprocess import sys @@ -14,10 +14,10 @@ def clang_tidy_diagnose(): config = subprocess.check_output( - [ToolChain.discover_tidy(), '-dump-config'], cwd=PROJECT_ROOT).decode() + [toolchain.discover_tidy(), '-dump-config'], cwd=PROJECT_ROOT).decode() check_list = subprocess.check_output( - [ToolChain.discover_tidy(), '-list-checks'], cwd=PROJECT_ROOT).decode() + [toolchain.discover_tidy(), '-list-checks'], cwd=PROJECT_ROOT).decode() checks = [x.strip() for x in check_list.split("\n") if '-' in x] @@ -31,7 +31,7 @@ def clang_tidy_diagnose(): def run_clang_tidy(source_dir, build_dir, filename, fix_issues: bool = False): - clang_tidy_binary = ToolChain.discover_tidy() + clang_tidy_binary = toolchain.discover_tidy() cmd = [clang_tidy_binary] output_file = os.path.abspath(os.path.join(build_dir, 'clang_tidy_fixes.yaml')) diff --git a/src/QsPasses/site-packages/TasksCI/Project.py b/src/QsPasses/site-packages/TasksCI/settings.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/Project.py rename to src/QsPasses/site-packages/TasksCI/settings.py diff --git a/src/QsPasses/site-packages/TasksCI/ToolChain/__init__.py b/src/QsPasses/site-packages/TasksCI/toolchain.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/ToolChain/__init__.py rename to src/QsPasses/site-packages/TasksCI/toolchain.py From d8949bfd38141b7796129b54b479fe5380e5e9ca Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 14:16:26 +0200 Subject: [PATCH 07/48] Refactoring --- src/QsPasses/CMakeLists.txt | 6 +-- src/QsPasses/README.md | 14 ++++++- .../examples/ClassicalIrCommandline/Makefile | 15 ++++++++ .../examples/ClassicalIrCommandline/README.md | 36 ++++++++++++++++++ .../classical-program.bc | Bin 0 -> 2416 bytes .../classical-program.c | 17 +++++++++ src/QsPasses/src/Llvm.hpp | 6 ++- .../OpsCounter.cpp} | 30 ++++++++------- src/QsPasses/src/OpsCounter/OpsCounter.hpp | 7 ++++ 9 files changed, 112 insertions(+), 19 deletions(-) create mode 100644 src/QsPasses/examples/ClassicalIrCommandline/Makefile create mode 100644 src/QsPasses/examples/ClassicalIrCommandline/README.md create mode 100644 src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc create mode 100644 src/QsPasses/examples/ClassicalIrCommandline/classical-program.c rename src/QsPasses/src/{GateCounter/GateCounter.cpp => OpsCounter/OpsCounter.cpp} (52%) create mode 100644 src/QsPasses/src/OpsCounter/OpsCounter.hpp diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt index 4135da0dd6..6cfd104282 100644 --- a/src/QsPasses/CMakeLists.txt +++ b/src/QsPasses/CMakeLists.txt @@ -11,8 +11,8 @@ message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wconversion -Wpedantic") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") # Needed for clang-tidy set(CMAKE_EXPORT_COMPILE_COMMANDS ON) @@ -27,6 +27,6 @@ if(NOT LLVM_ENABLE_RTTI) endif() # The main libary -add_library(QSharpPasses SHARED src/GateCounter/GateCounter.cpp) +add_library(QSharpPasses SHARED src/OpsCounter/OpsCounter.cpp) target_link_libraries(QSharpPasses "$<$:-undefined dynamic_lookup>") diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index b9a2728355..0472337a67 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -42,7 +42,13 @@ make [target] ## Running a pass -Yet to be written +You can run a pass using `opt` as follows: + +```sh +opt -load-pass-plugin ../../{Debug,Release}/libQSharpPasses.{dylib,so} --passes="operation-counter" -disable-output classical-program.bc +``` + +For a gentle introduction, see examples. ## CI @@ -78,6 +84,12 @@ Finally, to analyse the code, run make lint ``` +You can run all processes by running: + +```sh +make runci +``` + As `clang-tidy` and `clang-format` acts slightly different from version to version and on different platforms, it is recommended that you use a docker image to perform these steps. diff --git a/src/QsPasses/examples/ClassicalIrCommandline/Makefile b/src/QsPasses/examples/ClassicalIrCommandline/Makefile new file mode 100644 index 0000000000..b69052cb63 --- /dev/null +++ b/src/QsPasses/examples/ClassicalIrCommandline/Makefile @@ -0,0 +1,15 @@ +emit-llvm: + clang -O1 -S -emit-llvm classical-program.c -o classical-program.ll + +emit-llvm-bc: + clang -O1 -c -emit-llvm classical-program.c -o classical-program.bc + + +debug-ng-pass-mac: emit-llvm-bc + opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib --passes="operation-counter" -disable-output classical-program.bc + + + +clean: + rm -f classical-program.ll + rm -f classical-program.bc \ No newline at end of file diff --git a/src/QsPasses/examples/ClassicalIrCommandline/README.md b/src/QsPasses/examples/ClassicalIrCommandline/README.md new file mode 100644 index 0000000000..b293fc6b5c --- /dev/null +++ b/src/QsPasses/examples/ClassicalIrCommandline/README.md @@ -0,0 +1,36 @@ +# Emitting classical IRs + +This example demonstrates how to emit a classical IR and run a custom +pass on it. The purpose of this example is to teach the user how to apply +a pass to a IR using commandline tools only. + +IRs can be represented either by a human readible language or through bytecode. For +C programs former is generated by + +```sh + clang -O1 -S -emit-llvm classical-program.c -o classical-program.ll +``` + +where as the latter is generated writing: + +```sh + clang -O1 -c -emit-llvm classical-program.c -o classical-program.bc +``` + +This generates a nice and short IR which makes not too overwhelming to understand what is going on. + +## Legacy passes + +This part assumes that you have build the QsPasses library. + +```sh +opt -load ../../{Debug,Release}/libQSharpPasses.{dylib,so} -legacy-operation-counter -analyze classical-program.ll +``` + +## Next-gen passes + +This part assumes that you have build the QsPasses library. + +```sh +opt -load-pass-plugin ../../{Debug,Release}/libQSharpPasses.{dylib,so} --passes="operation-counter" -disable-output classical-program.bc +``` diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc new file mode 100644 index 0000000000000000000000000000000000000000..81f5c5224dc790063ec2c7aa487e9a61e44edde2 GIT binary patch literal 2416 zcmZ`*Z%kX)6~D%YYruYX0JUa$etiz<=CuudIGJl=vjH#7oDOM5%LgWE$*F!hChK2p7`)y( zn9y4~Y-+=gS}cP}r?hP9I@3lL^aIx$E*91Ab@1ZZB>yLcWiVlQ#{5jG=W6!CHGa7W zdf(s?(!yvtrf%jhzNTtFb}Iqhc49Hr(KeJ^XiX%B^vC@ww@ZcIJR(~(Xt-Iv42Pf% z8xVRM#-q;fJo*51J;IKCyRO4tr&awHJac_}D^Xy#@mmgp&i|m@rZ+->X^sAg-L1mFr(Mf|>r-PvMSW89jX-CX2Q*o;Fd za#g}IC`l2&8ArWJznMrG5={|b8oRYCil;uyF_juqm9_VBY}Lc<#I*M^#x=!LAm1~w z9pLmCu6W`j>qA0(*qet0T1=Sl$nWkQ=>zz5px_yc!50O)j;{LuCM`5UmnOR-`Jt>(l^jYE?CXz`~_9 zokxaJ3(epD;fhL}Z!;%+W(dZ65R+fS`bo9SE^o1u9g4oBXiF3p24zg+nV!?^A>DmF z^H63^Y6dxg>&J`iq|jK~7C%~2_7%mx1!Z3;ybzG^6*UILN)nDrN}2_pbe4`V2PtMt z(9HuQ#tnD{CS%^iO$u+=KaAVUV(%TL9m`A+7iY znW9c|Lwa!VQ6s1g$z)Vb<_v&65=qCNdr)N_Y%>4IF$aQjtO&STq)>r=-NUY$*yR{J z`KEg*v9~DprfdBN1=|hLmQ>nr6rP7mzn-^$sAVWGo z{s~}E_jebwlVY~}bz3}h0ABEBc>7R@xVt8HJ;tqX!mBZUq2MMB!?vt=`exi-avgu{ zh?|7MIGi3HdYIDD2yj26#b@7S+*mUHT)|O;O~$IH>HKSPTd82rihU4}uk(%_(Vm29 zVd0N0;3)}#Jx~nI*q4r7*w!-#IcC?zd?|D--5ua|ww|nP88^THXQw?I@4FK}nkd*7 z#NJZD@xfAWu>d|gO3piT@M_$64gxqrI{&i59JqD&Hv!qYT|u|Y>;CCtX7tRqz`*p& z&8EL@GCMBjksxlZnP~Z~Dr%N{&VKsmf(??DEj(WlPnX2gN%1szT^7HOB0NG*1EzeO zlNXbk&Ssy*$#=yTu3h`&Oh#|M=;*Nw{wig9+or#SUK&)O(74y*8}&~`CtT-5erO^% zae4f*XXX_?IN|e8&Cc*WJ>5Os0_sxdgn6XzQup`FBg09}8COX#U^j zj1~$D5c<+Rs7_j+WI-i`K9rO66JySFF#D{IxQR}G_9!a`9abnstKI;#| lg3v7hO8ow*anI!wl5h4EpJ%#zd=w6k!s$_ZoGSaT_HTfd7o-3H literal 0 HcmV?d00001 diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.c b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.c new file mode 100644 index 0000000000..be19ee2fcc --- /dev/null +++ b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.c @@ -0,0 +1,17 @@ +int foo(int x) +{ + return x; +} + +void bar(int x, int y) +{ + foo(x + y); +} + +int main() +{ + foo(2); + bar(3, 2); + + return 0; +} \ No newline at end of file diff --git a/src/QsPasses/src/Llvm.hpp b/src/QsPasses/src/Llvm.hpp index 408ad61cdf..cbff875717 100644 --- a/src/QsPasses/src/Llvm.hpp +++ b/src/QsPasses/src/Llvm.hpp @@ -10,6 +10,8 @@ #pragma GCC diagnostic ignored "-Wsign-compare" #pragma GCC diagnostic ignored "-Wunknown-warning-option" #pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Wall" +#pragma GCC diagnostic ignored "-Weverything" #endif #if defined(__clang__) @@ -21,6 +23,8 @@ #pragma clang diagnostic ignored "-Wreturn-std-move" #pragma clang diagnostic ignored "-Wunknown-warning-option" #pragma clang diagnostic ignored "-Wunused-parameter" +#pragma clang diagnostic ignored "-Wall" +#pragma clang diagnostic ignored "-Weverything" #endif #include "llvm/IR/LegacyPassManager.h" @@ -34,4 +38,4 @@ #if defined(__GNUC__) #pragma GCC diagnostic pop -#endif \ No newline at end of file +#endif diff --git a/src/QsPasses/src/GateCounter/GateCounter.cpp b/src/QsPasses/src/OpsCounter/OpsCounter.cpp similarity index 52% rename from src/QsPasses/src/GateCounter/GateCounter.cpp rename to src/QsPasses/src/OpsCounter/OpsCounter.cpp index 194f49ab06..b9bbc1469a 100644 --- a/src/QsPasses/src/GateCounter/GateCounter.cpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.cpp @@ -1,6 +1,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +#include "OpsCounter/OpsCounter.hpp" + #include "Llvm.hpp" using namespace llvm; @@ -10,11 +12,11 @@ namespace void Visitor(Function& f) { - errs() << "(gate-counter) " << f.getName() << "\n"; - errs() << "(gate-counter) number of arguments: " << f.arg_size() << "\n"; + errs() << "(operation-counter) " << f.getName() << "\n"; + errs() << "(operation-counter) number of arguments: " << f.arg_size() << "\n"; } -struct GateCounterPass : PassInfoMixin +struct OpsCounterPass : PassInfoMixin { static auto run(Function& f, FunctionAnalysisManager& /*unused*/) -> PreservedAnalyses // NOLINT { @@ -24,11 +26,11 @@ struct GateCounterPass : PassInfoMixin } }; -class CLegacyGateCounterPass : public FunctionPass +class CLegacyOpsCounterPass : public FunctionPass { public: static char ID; - CLegacyGateCounterPass() + CLegacyOpsCounterPass() : FunctionPass(ID) { } @@ -41,14 +43,14 @@ class CLegacyGateCounterPass : public FunctionPass }; } // namespace -auto GetGateCounterPluginInfo() -> llvm::PassPluginLibraryInfo +llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() { - return {LLVM_PLUGIN_API_VERSION, "GateCounter", LLVM_VERSION_STRING, [](PassBuilder& pb) { + return {LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder& pb) { pb.registerPipelineParsingCallback( [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { - if (name == "gate-counter") + if (name == "operation-counter") { - fpm.addPass(GateCounterPass()); + fpm.addPass(OpsCounterPass()); return true; } return false; @@ -56,14 +58,14 @@ auto GetGateCounterPluginInfo() -> llvm::PassPluginLibraryInfo }}; } -extern "C" LLVM_ATTRIBUTE_WEAK auto llvmGetPassPluginInfo() -> ::llvm::PassPluginLibraryInfo +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return GetGateCounterPluginInfo(); + return GetOpsCounterPluginInfo(); } -char CLegacyGateCounterPass::ID = 0; -static RegisterPass LegacyGateCounterRegistration( - "legacy-gate-counter", +char CLegacyOpsCounterPass::ID = 0; +static RegisterPass LegacyOpsCounterRegistration( + "legacy-operation-counter", "Gate Counter Pass", true, false); diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.hpp b/src/QsPasses/src/OpsCounter/OpsCounter.hpp new file mode 100644 index 0000000000..a26cc5e757 --- /dev/null +++ b/src/QsPasses/src/OpsCounter/OpsCounter.hpp @@ -0,0 +1,7 @@ +#pragma once +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "Llvm.hpp" + +auto GetOpsCounterPluginInfo() -> llvm::PassPluginLibraryInfo; From cc1a0e6270488712d500c84f8b49304616f8faef Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 14:16:47 +0200 Subject: [PATCH 08/48] Removing binary IR --- .../ClassicalIrCommandline/classical-program.bc | Bin 2416 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc deleted file mode 100644 index 81f5c5224dc790063ec2c7aa487e9a61e44edde2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2416 zcmZ`*Z%kX)6~D%YYruYX0JUa$etiz<=CuudIGJl=vjH#7oDOM5%LgWE$*F!hChK2p7`)y( zn9y4~Y-+=gS}cP}r?hP9I@3lL^aIx$E*91Ab@1ZZB>yLcWiVlQ#{5jG=W6!CHGa7W zdf(s?(!yvtrf%jhzNTtFb}Iqhc49Hr(KeJ^XiX%B^vC@ww@ZcIJR(~(Xt-Iv42Pf% z8xVRM#-q;fJo*51J;IKCyRO4tr&awHJac_}D^Xy#@mmgp&i|m@rZ+->X^sAg-L1mFr(Mf|>r-PvMSW89jX-CX2Q*o;Fd za#g}IC`l2&8ArWJznMrG5={|b8oRYCil;uyF_juqm9_VBY}Lc<#I*M^#x=!LAm1~w z9pLmCu6W`j>qA0(*qet0T1=Sl$nWkQ=>zz5px_yc!50O)j;{LuCM`5UmnOR-`Jt>(l^jYE?CXz`~_9 zokxaJ3(epD;fhL}Z!;%+W(dZ65R+fS`bo9SE^o1u9g4oBXiF3p24zg+nV!?^A>DmF z^H63^Y6dxg>&J`iq|jK~7C%~2_7%mx1!Z3;ybzG^6*UILN)nDrN}2_pbe4`V2PtMt z(9HuQ#tnD{CS%^iO$u+=KaAVUV(%TL9m`A+7iY znW9c|Lwa!VQ6s1g$z)Vb<_v&65=qCNdr)N_Y%>4IF$aQjtO&STq)>r=-NUY$*yR{J z`KEg*v9~DprfdBN1=|hLmQ>nr6rP7mzn-^$sAVWGo z{s~}E_jebwlVY~}bz3}h0ABEBc>7R@xVt8HJ;tqX!mBZUq2MMB!?vt=`exi-avgu{ zh?|7MIGi3HdYIDD2yj26#b@7S+*mUHT)|O;O~$IH>HKSPTd82rihU4}uk(%_(Vm29 zVd0N0;3)}#Jx~nI*q4r7*w!-#IcC?zd?|D--5ua|ww|nP88^THXQw?I@4FK}nkd*7 z#NJZD@xfAWu>d|gO3piT@M_$64gxqrI{&i59JqD&Hv!qYT|u|Y>;CCtX7tRqz`*p& z&8EL@GCMBjksxlZnP~Z~Dr%N{&VKsmf(??DEj(WlPnX2gN%1szT^7HOB0NG*1EzeO zlNXbk&Ssy*$#=yTu3h`&Oh#|M=;*Nw{wig9+or#SUK&)O(74y*8}&~`CtT-5erO^% zae4f*XXX_?IN|e8&Cc*WJ>5Os0_sxdgn6XzQup`FBg09}8COX#U^j zj1~$D5c<+Rs7_j+WI-i`K9rO66JySFF#D{IxQR}G_9!a`9abnstKI;#| lg3v7hO8ow*anI!wl5h4EpJ%#zd=w6k!s$_ZoGSaT_HTfd7o-3H From 1a98e31d49231a7c1154d9b91614dd93e8586953 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 14:17:50 +0200 Subject: [PATCH 09/48] Updating gitignore --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 77a2c5708e..e0f3d8dcec 100644 --- a/.gitignore +++ b/.gitignore @@ -306,6 +306,9 @@ paket-files/ __pycache__/ *.pyc +# Python Virtual environments +*__venv/ + # Cake - Uncomment if you are using it # tools/** # !tools/packages.config From 74a492457c98d302243735c2ba84d29786a0dae6 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 14:40:38 +0200 Subject: [PATCH 10/48] Creating root tool for performing CI tasks --- src/QsPasses/Makefile | 14 --- src/QsPasses/develop.env | 3 - src/QsPasses/manage | 10 ++ .../site-packages/TasksCI/__main__.py | 92 +-------------- src/QsPasses/site-packages/TasksCI/cli.py | 106 ++++++++++++++++++ 5 files changed, 117 insertions(+), 108 deletions(-) delete mode 100644 src/QsPasses/develop.env create mode 100755 src/QsPasses/manage create mode 100644 src/QsPasses/site-packages/TasksCI/cli.py diff --git a/src/QsPasses/Makefile b/src/QsPasses/Makefile index 0a281789aa..e039211bb9 100644 --- a/src/QsPasses/Makefile +++ b/src/QsPasses/Makefile @@ -1,18 +1,4 @@ -stylecheck: - @source develop.env && python -m TasksCI stylecheck - -fixstyle: - @source develop.env && python -m TasksCI --loglevel warning stylecheck --fix-issues - -lint: - @source develop.env && python -m TasksCI lint - -tests: - @source develop.env && python -m TasksCI test - - clean: rm -rf Release/ rm -rf Debug/ -runci: stylecheck lint tests clean diff --git a/src/QsPasses/develop.env b/src/QsPasses/develop.env deleted file mode 100644 index 002d66b4ee..0000000000 --- a/src/QsPasses/develop.env +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -export PYTHONPATH=$PYTHONPATH:$PWD/site-packages \ No newline at end of file diff --git a/src/QsPasses/manage b/src/QsPasses/manage new file mode 100755 index 0000000000..ada5de795d --- /dev/null +++ b/src/QsPasses/manage @@ -0,0 +1,10 @@ +#!/usr/bin/env python +import os +import sys + +ROOT = os.path.dirname(__file__) +sys.path.insert(0,os.path.join(ROOT, "site-packages")) + +from TasksCI.cli import cli + +cli() \ No newline at end of file diff --git a/src/QsPasses/site-packages/TasksCI/__main__.py b/src/QsPasses/site-packages/TasksCI/__main__.py index 69e83e40ef..425abf50fa 100644 --- a/src/QsPasses/site-packages/TasksCI/__main__.py +++ b/src/QsPasses/site-packages/TasksCI/__main__.py @@ -1,97 +1,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. - -from .formatting import main as style_check_main -from .builder import main as builder_main -from .linting import main as lint_main, clang_tidy_diagnose - -import click -import logging -import sys - -logger = logging.getLogger() - -# Logging configuration -ch = logging.StreamHandler() -ch.setLevel(logging.DEBUG) -formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') -ch.setFormatter(formatter) -logger.addHandler(ch) - -# By default we only log errors -logger.setLevel(logging.ERROR) - - -@click.group() -@click.option('--loglevel', default="error") -def cli(loglevel): - levels = { - "critical": 50, - "error": 40, - "warning": 30, - "info": 20, - "debug": 10, - "notset": 0 - } - - loglevel = loglevel.lower() - if loglevel not in levels: - logger.critical("Invalid log level") - sys.exit(-1) - - logger.setLevel(levels[loglevel]) - logger.info("Loglevel set to {}".format(loglevel)) - - -@cli.command() -@click.option('--fix-issues', default=False, is_flag=True) -def stylecheck(fix_issues): - logger.info("Invoking style checker") - - style_check_main(fix_issues) - - -@cli.command() -@click.option("--diagnose", default=False, is_flag=True) -@click.option('--fix-issues', default=False, is_flag=True) -@click.option('--force', default=False, is_flag=True) -def lint(diagnose, fix_issues, force): - if diagnose: - clang_tidy_diagnose() - return - - if fix_issues: - if not force: - print("""Fixing isssues using Clang Tidy will break your code. -Make sure that you have committed your changes BEFORE DOING THIS. -Even so, this feature is experimental and there have been reports of -clang-tidy modying system libraries - therefore, USE THIS FEATURE AT -YOUR OWN RISK. - -Write 'I understand' to proceed.""") - print(":") - x = input() - if x.lower() != "i understand": - print("Wrong answer - stopping!") - exit(-1) - - logger.info("Invoking linter") - lint_main(fix_issues) - - -@cli.command() -@click.option('--debug/--no-debug', default=True) -@click.option('--generator', default=None) -def test(debug, generator): - logger.info("Building and testing") - - build_dir = "Debug" - if not debug: - build_dir = "Release" - - builder_main(build_dir, generator, True) - +from .cli import cli if __name__ == '__main__': cli() diff --git a/src/QsPasses/site-packages/TasksCI/cli.py b/src/QsPasses/site-packages/TasksCI/cli.py new file mode 100644 index 0000000000..1a9a1f3f25 --- /dev/null +++ b/src/QsPasses/site-packages/TasksCI/cli.py @@ -0,0 +1,106 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +from .formatting import main as style_check_main +from .builder import main as builder_main +from .linting import main as lint_main, clang_tidy_diagnose + +import click +import logging +import sys + +logger = logging.getLogger() + +# Logging configuration +ch = logging.StreamHandler() +ch.setLevel(logging.DEBUG) +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +ch.setFormatter(formatter) +logger.addHandler(ch) + +# By default we only log errors +logger.setLevel(logging.ERROR) + + +@click.group() +@click.option('--loglevel', default="error") +def cli(loglevel): + levels = { + "critical": 50, + "error": 40, + "warning": 30, + "info": 20, + "debug": 10, + "notset": 0 + } + + loglevel = loglevel.lower() + if loglevel not in levels: + logger.critical("Invalid log level") + sys.exit(-1) + + logger.setLevel(levels[loglevel]) + logger.info("Loglevel set to {}".format(loglevel)) + + +@cli.command() +@click.option('--fix-issues', default=False, is_flag=True) +def stylecheck(fix_issues): + logger.info("Invoking style checker") + + style_check_main(fix_issues) + + +@cli.command() +@click.option("--diagnose", default=False, is_flag=True) +@click.option('--fix-issues', default=False, is_flag=True) +@click.option('--force', default=False, is_flag=True) +def lint(diagnose, fix_issues, force): + if diagnose: + clang_tidy_diagnose() + return + + if fix_issues: + if not force: + print("""Fixing isssues using Clang Tidy will break your code. +Make sure that you have committed your changes BEFORE DOING THIS. +Even so, this feature is experimental and there have been reports of +clang-tidy modying system libraries - therefore, USE THIS FEATURE AT +YOUR OWN RISK. + +Write 'I understand' to proceed.""") + print(":") + x = input() + if x.lower() != "i understand": + print("Wrong answer - stopping!") + exit(-1) + + logger.info("Invoking linter") + lint_main(fix_issues) + + +@cli.command() +@click.option('--debug/--no-debug', default=True) +@click.option('--generator', default=None) +def test(debug, generator): + logger.info("Building and testing") + + build_dir = "Debug" + if not debug: + build_dir = "Release" + + builder_main(build_dir, generator, True) + + +@cli.command() +def runci(): + build_dir = "Debug" + + style_check_main(False) + lint_main(False) + builder_main(build_dir, None, True) + + +if __name__ == '__main__': + cli() From 9841c95acd3d273dc2f77ee7765d582ef0ac138c Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 14:46:52 +0200 Subject: [PATCH 11/48] Updating documentation --- src/QsPasses/README.md | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index 0472337a67..1a7a15675c 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -69,30 +69,26 @@ These adds the necessary environment variables to ensure that you have the `Task To check the style, run ```sh -make stylecheck +./manage stylecheck ``` To test that the code compiles and tests passes run ```sh -make tests +./manage test ``` Finally, to analyse the code, run ```sh -make lint +./manage lint ``` You can run all processes by running: ```sh -make runci +./manage runci ``` As `clang-tidy` and `clang-format` acts slightly different from version to version and on different platforms, it is recommended -that you use a docker image to perform these steps. - -# TODOs - -Look at https://github.com/llvm-mirror/clang-tools-extra/blob/master/clang-tidy/tool/run-clang-tidy.py +that you use a docker image to perform these steps. TODO(TFR): The docker image is not added yet and this will be documented in the future. From 1a5e95f8ecc78960a1886f785f0a2060ecc2d332 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 17:05:21 +0200 Subject: [PATCH 12/48] Refactoring pass --- .../classical-program.bc | Bin 0 -> 2416 bytes src/QsPasses/src/OpsCounter/OpsCounter.cpp | 68 ++++-------------- src/QsPasses/src/OpsCounter/OpsCounter.hpp | 30 ++++++++ 3 files changed, 43 insertions(+), 55 deletions(-) create mode 100644 src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc new file mode 100644 index 0000000000000000000000000000000000000000..81f5c5224dc790063ec2c7aa487e9a61e44edde2 GIT binary patch literal 2416 zcmZ`*Z%kX)6~D%YYruYX0JUa$etiz<=CuudIGJl=vjH#7oDOM5%LgWE$*F!hChK2p7`)y( zn9y4~Y-+=gS}cP}r?hP9I@3lL^aIx$E*91Ab@1ZZB>yLcWiVlQ#{5jG=W6!CHGa7W zdf(s?(!yvtrf%jhzNTtFb}Iqhc49Hr(KeJ^XiX%B^vC@ww@ZcIJR(~(Xt-Iv42Pf% z8xVRM#-q;fJo*51J;IKCyRO4tr&awHJac_}D^Xy#@mmgp&i|m@rZ+->X^sAg-L1mFr(Mf|>r-PvMSW89jX-CX2Q*o;Fd za#g}IC`l2&8ArWJznMrG5={|b8oRYCil;uyF_juqm9_VBY}Lc<#I*M^#x=!LAm1~w z9pLmCu6W`j>qA0(*qet0T1=Sl$nWkQ=>zz5px_yc!50O)j;{LuCM`5UmnOR-`Jt>(l^jYE?CXz`~_9 zokxaJ3(epD;fhL}Z!;%+W(dZ65R+fS`bo9SE^o1u9g4oBXiF3p24zg+nV!?^A>DmF z^H63^Y6dxg>&J`iq|jK~7C%~2_7%mx1!Z3;ybzG^6*UILN)nDrN}2_pbe4`V2PtMt z(9HuQ#tnD{CS%^iO$u+=KaAVUV(%TL9m`A+7iY znW9c|Lwa!VQ6s1g$z)Vb<_v&65=qCNdr)N_Y%>4IF$aQjtO&STq)>r=-NUY$*yR{J z`KEg*v9~DprfdBN1=|hLmQ>nr6rP7mzn-^$sAVWGo z{s~}E_jebwlVY~}bz3}h0ABEBc>7R@xVt8HJ;tqX!mBZUq2MMB!?vt=`exi-avgu{ zh?|7MIGi3HdYIDD2yj26#b@7S+*mUHT)|O;O~$IH>HKSPTd82rihU4}uk(%_(Vm29 zVd0N0;3)}#Jx~nI*q4r7*w!-#IcC?zd?|D--5ua|ww|nP88^THXQw?I@4FK}nkd*7 z#NJZD@xfAWu>d|gO3piT@M_$64gxqrI{&i59JqD&Hv!qYT|u|Y>;CCtX7tRqz`*p& z&8EL@GCMBjksxlZnP~Z~Dr%N{&VKsmf(??DEj(WlPnX2gN%1szT^7HOB0NG*1EzeO zlNXbk&Ssy*$#=yTu3h`&Oh#|M=;*Nw{wig9+or#SUK&)O(74y*8}&~`CtT-5erO^% zae4f*XXX_?IN|e8&Cc*WJ>5Os0_sxdgn6XzQup`FBg09}8COX#U^j zj1~$D5c<+Rs7_j+WI-i`K9rO66JySFF#D{IxQR}G_9!a`9abnstKI;#| lg3v7hO8ow*anI!wl5h4EpJ%#zd=w6k!s$_ZoGSaT_HTfd7o-3H literal 0 HcmV?d00001 diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.cpp b/src/QsPasses/src/OpsCounter/OpsCounter.cpp index b9bbc1469a..9400bbdb82 100644 --- a/src/QsPasses/src/OpsCounter/OpsCounter.cpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.cpp @@ -7,65 +7,23 @@ using namespace llvm; -namespace -{ - -void Visitor(Function& f) -{ - errs() << "(operation-counter) " << f.getName() << "\n"; - errs() << "(operation-counter) number of arguments: " << f.arg_size() << "\n"; -} - -struct OpsCounterPass : PassInfoMixin -{ - static auto run(Function& f, FunctionAnalysisManager& /*unused*/) -> PreservedAnalyses // NOLINT - { - Visitor(f); - - return PreservedAnalyses::all(); - } -}; - -class CLegacyOpsCounterPass : public FunctionPass -{ - public: - static char ID; - CLegacyOpsCounterPass() - : FunctionPass(ID) - { - } - - auto runOnFunction(Function& f) -> bool override - { - Visitor(f); - return false; - } -}; -} // namespace - llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() { - return {LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder& pb) { - pb.registerPipelineParsingCallback( - [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { - if (name == "operation-counter") - { - fpm.addPass(OpsCounterPass()); - return true; - } - return false; - }); - }}; + return { + LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder &pb) { + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "operation-counter") + { + fpm.addPass(COpsCounterPrinter(llvm::errs())); + return true; + } + return false; + }); + }}; } extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return GetOpsCounterPluginInfo(); + return GetOpsCounterPluginInfo(); } - -char CLegacyOpsCounterPass::ID = 0; -static RegisterPass LegacyOpsCounterRegistration( - "legacy-operation-counter", - "Gate Counter Pass", - true, - false); diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.hpp b/src/QsPasses/src/OpsCounter/OpsCounter.hpp index a26cc5e757..4095c6ad34 100644 --- a/src/QsPasses/src/OpsCounter/OpsCounter.hpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.hpp @@ -4,4 +4,34 @@ #include "Llvm.hpp" +class COpsCounterPrinter : public llvm::PassInfoMixin +{ +public: + explicit COpsCounterPrinter(llvm::raw_ostream &out_stream) + : out_stream_(out_stream) + {} + + // llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); + auto run(llvm::Function &f, llvm::FunctionAnalysisManager & /*unused*/) + -> llvm::PreservedAnalyses // NOLINT + { + out_stream_ << "(operation-counter) " << f.getName() << "\n"; + out_stream_ << "(operation-counter) number of arguments: " << f.arg_size() << "\n"; + + return llvm::PreservedAnalyses::all(); + } + /* + TODO(TFR): Documentation suggests that there such be a isRequired, however, comes out as + unused after compilation + */ + + static bool isRequired() + { + return true; + } + +private: + llvm::raw_ostream &out_stream_; +}; + auto GetOpsCounterPluginInfo() -> llvm::PassPluginLibraryInfo; From f8c7a9731ddb4dfb69f683cd622c950f5910f6b4 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 21 Jul 2021 17:37:42 +0200 Subject: [PATCH 13/48] Preparing analysis module --- .../classical-program.bc | Bin 2416 -> 2400 bytes .../examples/ClassicalIrCommandline/out.txt | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/QsPasses/examples/ClassicalIrCommandline/out.txt diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc index 81f5c5224dc790063ec2c7aa487e9a61e44edde2..bb5a7848a56b2412806ec92ed5bbcb91408915ef 100644 GIT binary patch delta 360 zcmew$^gxLB-T`rL1|SdtVwZ`$(t^3ax5RS?Hux|C1r&K07?>tnE|-`I7!8-fYhJg;DXvLb*E&Wo{+NJXk2#P^`gTuEAdE(O$^VQB}}h$-!Q% zF*Rdq6FN)@3~eEY z9T*rCfVOe~u>%9ofeEJ+fR14S@)$v)4U7|RC*^b9u}u!}KrerJ~0VBf&>k9o2`#}!tv#AFRl4**)Hb4CCF delta 392 zcmaDL^g)RC-T`rL1|Sdt;*g2F(t@$SxB7DjHux|C1r&K07}zFSE)QP}%O|i`YP1(FU@tq-ZZ@I4dCLVJ-yyO~0J$%gh4AUmVEvV*;H zLA#j&dqqZb83%in1N#A)LyfY}7RoSe{PBg+<<&viXNods7RuZ@AoE}$?=3}{7mTtD z#T@KqGun$7I;wyq(2xrDVz41)73~EJ;3hQiwg@nYF)&PaXFj3BkigIua@c`^K>_Gm z4j^V=;5jhilmgHbEI>BMLkta!6K*I3m^eICFyg89Yg~`DA;Vngwb3QQ2ol2B> zk?3~i=3$E#X6p@y^=~*^opHA6aJJguY<0oej-gV4fkAchQ8rb*Yz8kUMuzg_jKre& z;>@bl08eKHpVYF{oaCIuymW=K)S}|d{5%CiLp?)11BT5C>|%_ZznLXI*f%gWu}rq- NxWWpSnJmHS0RYRyfqeh~ diff --git a/src/QsPasses/examples/ClassicalIrCommandline/out.txt b/src/QsPasses/examples/ClassicalIrCommandline/out.txt new file mode 100644 index 0000000000..e69de29bb2 From 87c08b9fa1fa74709c32c9fa182ca4ece5ae23b4 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 10:37:01 +0200 Subject: [PATCH 14/48] Adding a style proposal --- src/QsPasses/CMakeLists.txt | 20 ++++- src/QsPasses/CONTRIBUTING.md | 77 +++++++++++++++++++ src/QsPasses/README.md | 64 ++++++++++++++- .../examples/ClassicalIrCommandline/Makefile | 2 +- .../examples/ClassicalIrCommandline/out.txt | 0 .../site-packages/TasksCI/__main__.py | 2 +- src/QsPasses/site-packages/TasksCI/builder.py | 2 +- src/QsPasses/site-packages/TasksCI/cli.py | 2 +- .../site-packages/TasksCI/formatting.py | 6 +- src/QsPasses/site-packages/TasksCI/linting.py | 2 +- .../site-packages/TasksCI/settings.py | 2 +- .../site-packages/TasksCI/toolchain.py | 2 +- src/QsPasses/src/Llvm.hpp | 2 +- src/QsPasses/src/OpsCounter/OpsCounter.cpp | 17 +++- src/QsPasses/src/OpsCounter/OpsCounter.hpp | 60 +++++++++++++-- 15 files changed, 239 insertions(+), 21 deletions(-) create mode 100644 src/QsPasses/CONTRIBUTING.md delete mode 100644 src/QsPasses/examples/ClassicalIrCommandline/out.txt diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt index 6cfd104282..976649b6ab 100644 --- a/src/QsPasses/CMakeLists.txt +++ b/src/QsPasses/CMakeLists.txt @@ -8,7 +8,7 @@ message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") # Setting the standard for -set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") @@ -18,14 +18,30 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") set(CMAKE_EXPORT_COMPILE_COMMANDS ON) include_directories(${LLVM_INCLUDE_DIRS}) +link_directories(${LLVM_LIBRARY_DIRS}) add_definitions(${LLVM_DEFINITIONS}) include_directories(${CMAKE_SOURCE_DIR}/src) -# LLVM uses RTTI by default - added here for consistency + + +# Compiler flags +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always") + +# LLVM is normally built without RTTI. Be consistent with that. if(NOT LLVM_ENABLE_RTTI) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") endif() +# -fvisibility-inlines-hidden is set when building LLVM and on Darwin warnings +# are triggered if llvm-tutor is built without this flag (though otherwise it +# builds fine). For consistency, add it here too. +include(CheckCXXCompilerFlag) +check_cxx_compiler_flag("-fvisibility-inlines-hidden" SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG) +if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} EQUAL "1") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") +endif() + + # The main libary add_library(QSharpPasses SHARED src/OpsCounter/OpsCounter.cpp) target_link_libraries(QSharpPasses diff --git a/src/QsPasses/CONTRIBUTING.md b/src/QsPasses/CONTRIBUTING.md new file mode 100644 index 0000000000..5cdd18327f --- /dev/null +++ b/src/QsPasses/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing (Proposal - WiP) + +This document is work in progress and nothing is set in stone. + +## Why do we need a style guide? + +Consistency and readibility such that it is easy to read and understand code that was not written by yourself. For example, if one developer uses `CamelCase` for namespaces and `snake_case` for classes while another uses `snake_case` for namespaces and `CamelCase` you may end up with code sections that looks like this + +```cpp +int32_t main() +{ + name_space1::Class1 hello; + NameSpace2::class_name world; +} +``` + +which is hard to read. + +## What does the style guide apply to? + +The style guide applies to any new code written as well as code that is being refactored added to the `QsPasses` library. We do not rewrite existing code for the sake just changing the style. + +## Style discrepency + +In case of a discrepency between this guideline and `clang-tidy` or `clang-format`, +clang tools rule. In case of discrency between this guide and any guides subsequently referenced guides, this guide rule. However, feel free to suggest changes. Changes will be incorporated on the basis +that updated styles are apply to new code and not existing code. + +## Naming + +Naming is taken from the [Microsoft AirSim](https://github.com/microsoft/AirSim/blob/master/docs/coding_guidelines.md) project. + +| **Code Element** | **Style** | **Comment** | +| --------------------- | -------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | +| Namespace | snake_case | Differentiates `namespace::ClassName` and `ClassName::SubClass` names | +| Class name | CamelCase | To differentiate from STL types which ISO recommends (do not use "C" or "T" prefixes) | +| Function name | camelCase | Lower case start is almost universal except for .Net world | +| Parameters/Locals | snake_case | Vast majority of standards recommends this because \_ is more readable to C++ crowd (although not much to Java/.Net crowd) | +| Member variables | snake_case_with\_ | The prefix \_ is heavily discouraged as ISO has rules around reserving \_identifiers, so we recommend suffix instead | +| Enums and its members | CamelCase | Most except very old standards agree with this one | +| Globals | g_snake_case | Avoid using globals whenever possible, but if you have to use `g_`. | +| Constants | UPPER_CASE | Very contentious and we just have to pick one here, unless if is a private constant in class or method, then use naming for Members or Locals | +| File names | Match case of class name in file | Lot of pro and cons either way but this removes inconsistency in auto generated code (important for ROS) | + +## Modernise when possible + +In general, modernise the code where possible. For instance, prefer `using` of `typedef`. + +## Header guards + +Prefer `#pragma once` over `#ifdef` protection. + +## Code TODOs must contain owner name or Github issue + +```sh +% ./manage runci +(...) +QsPasses/src/OpsCounter/OpsCounter.cpp:39:21: error: missing username/bug in TODO [google-readability-todo,-warnings-as-errors] + // TODO: Fails to load if this is present + ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // TODO(tfr): Fails to load if this is present +``` + +## Always add copyrights + +Always add copyrights at the top of the file. + +```text +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +``` + +For header files, prefer to put `#prama once` before the copyright. + +## Tabs vs. spaces + +Seriously, this should not even be a discussion: It does not matter. If you prefer one over the other feel free to write in whatever style you prefer as long as you use `clang-format` before making a PR. Again, the key here is consistency and readibility. diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index 1a7a15675c..e242736dbb 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -2,11 +2,41 @@ This library defines LLVM passes used for optimising and transforming the IR. -## Getting started - The Q# pass library is a dynamic library that can be compiled and ran separately from the rest of the project code. +## What does LLVM passes do? + +Example 1: Optimisation + +``` +define double @test(double %x) { +entry: + %addtmp = fadd double 3.000000e+00, %x + %addtmp1 = fadd double %x, 3.000000e+00 + %multmp = fmul double %addtmp, %addtmp1 + ret double %multmp +} +``` + +``` +define double @test(double %x) { +entry: + %addtmp = fadd double 3.000000e+00, %x + ret double %addtmp +} +``` + +Example 2: Analytics + +Example 3: Validation + +## Out-of-source Pass + +This library is build as set of out-of-source-passes. All this means is that we will not be downloading the LLVM repository and modifying this repository directly. You can read more [here](https://llvm.org/docs/CMake.html#cmake-out-of-source-pass). + +# Getting started + ## Dependencies This library is written in C++ and depends on: @@ -92,3 +122,33 @@ You can run all processes by running: As `clang-tidy` and `clang-format` acts slightly different from version to version and on different platforms, it is recommended that you use a docker image to perform these steps. TODO(TFR): The docker image is not added yet and this will be documented in the future. + +# Developer FAQ + +## Pass does not load + +One error that you may encounter is that an analysis pass does not load with output similar to this: + +```sh +% opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -enable-debugify --passes="operation-counter" -disable-output classical-program.bc +Failed to load passes from '../../Debug/libQSharpPasses.dylib'. Request ignored. +opt: unknown pass name 'operation-counter' +``` + +This is likely becuase you have forgotten to instantiate static class members. For instance, in the case of an instance of `llvm::AnalysisInfoMixin` you are required to have static member `Key`: + +```cpp +class COpsCounterPass : public llvm::AnalysisInfoMixin { +private: + static llvm::AnalysisKey Key; //< REQUIRED by llvm registration + friend struct llvm::AnalysisInfoMixin; +}; +``` + +If you forget to instantiate this variable in your corresponding `.cpp` file, + +```cpp +// llvm::AnalysisKey COpsCounterPass::Key; //< Uncomment this line to make everything work +``` + +everything will compile, but the pass will fail to load. There will be no linking errors either. diff --git a/src/QsPasses/examples/ClassicalIrCommandline/Makefile b/src/QsPasses/examples/ClassicalIrCommandline/Makefile index b69052cb63..47609f40ac 100644 --- a/src/QsPasses/examples/ClassicalIrCommandline/Makefile +++ b/src/QsPasses/examples/ClassicalIrCommandline/Makefile @@ -6,7 +6,7 @@ emit-llvm-bc: debug-ng-pass-mac: emit-llvm-bc - opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib --passes="operation-counter" -disable-output classical-program.bc + opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -debug --passes="operation-counter" -disable-output classical-program.bc diff --git a/src/QsPasses/examples/ClassicalIrCommandline/out.txt b/src/QsPasses/examples/ClassicalIrCommandline/out.txt deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/QsPasses/site-packages/TasksCI/__main__.py b/src/QsPasses/site-packages/TasksCI/__main__.py index 425abf50fa..59d8961088 100644 --- a/src/QsPasses/site-packages/TasksCI/__main__.py +++ b/src/QsPasses/site-packages/TasksCI/__main__.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from .cli import cli diff --git a/src/QsPasses/site-packages/TasksCI/builder.py b/src/QsPasses/site-packages/TasksCI/builder.py index 329f1cbb32..73d8f7fb87 100644 --- a/src/QsPasses/site-packages/TasksCI/builder.py +++ b/src/QsPasses/site-packages/TasksCI/builder.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import os diff --git a/src/QsPasses/site-packages/TasksCI/cli.py b/src/QsPasses/site-packages/TasksCI/cli.py index 1a9a1f3f25..62a4962ed6 100644 --- a/src/QsPasses/site-packages/TasksCI/cli.py +++ b/src/QsPasses/site-packages/TasksCI/cli.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/src/QsPasses/site-packages/TasksCI/formatting.py b/src/QsPasses/site-packages/TasksCI/formatting.py index 9cf87c609b..578580ba98 100644 --- a/src/QsPasses/site-packages/TasksCI/formatting.py +++ b/src/QsPasses/site-packages/TasksCI/formatting.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from os import path @@ -35,7 +35,7 @@ def require_todo_owner(filename, contents, cursor, fix_issues): def enforce_cpp_license(filename, contents, cursor, fix_issues): - return require_token("""// Copyright (c) Microsoft Corporation. All rights reserved. + return require_token("""// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. """, filename, contents, cursor, fix_issues) @@ -46,7 +46,7 @@ def enforce_py_license(filename, contents, cursor, fix_issues): if contents.strip() == "": return cursor, False - return require_token("""# Copyright (c) Microsoft Corporation. All rights reserved. + return require_token("""# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. """, filename, contents, cursor, fix_issues) diff --git a/src/QsPasses/site-packages/TasksCI/linting.py b/src/QsPasses/site-packages/TasksCI/linting.py index 6f12f49d3c..4a692ab302 100644 --- a/src/QsPasses/site-packages/TasksCI/linting.py +++ b/src/QsPasses/site-packages/TasksCI/linting.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import logging diff --git a/src/QsPasses/site-packages/TasksCI/settings.py b/src/QsPasses/site-packages/TasksCI/settings.py index ef4ba177cd..85d5b667f2 100644 --- a/src/QsPasses/site-packages/TasksCI/settings.py +++ b/src/QsPasses/site-packages/TasksCI/settings.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from os import path diff --git a/src/QsPasses/site-packages/TasksCI/toolchain.py b/src/QsPasses/site-packages/TasksCI/toolchain.py index f666565860..5bda4c33f9 100644 --- a/src/QsPasses/site-packages/TasksCI/toolchain.py +++ b/src/QsPasses/site-packages/TasksCI/toolchain.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import shutil diff --git a/src/QsPasses/src/Llvm.hpp b/src/QsPasses/src/Llvm.hpp index cbff875717..f24aef3726 100644 --- a/src/QsPasses/src/Llvm.hpp +++ b/src/QsPasses/src/Llvm.hpp @@ -1,5 +1,5 @@ #pragma once -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #if defined(__GNUC__) diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.cpp b/src/QsPasses/src/OpsCounter/OpsCounter.cpp index 9400bbdb82..12922350ee 100644 --- a/src/QsPasses/src/OpsCounter/OpsCounter.cpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.cpp @@ -1,16 +1,20 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include "OpsCounter/OpsCounter.hpp" #include "Llvm.hpp" +#include +#include using namespace llvm; +llvm::AnalysisKey COpsCounterPass::Key; llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() { return { LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the printer pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, ArrayRef /*unused*/) { if (name == "operation-counter") @@ -20,6 +24,17 @@ llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() } return false; }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { + fpm.addPass(COpsCounterPrinter(llvm::errs())); + }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { + // TODO: Fails to load if this is present + fam.registerPass([] { return COpsCounterPass(); }); + }); }}; } diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.hpp b/src/QsPasses/src/OpsCounter/OpsCounter.hpp index 4095c6ad34..d4adf42781 100644 --- a/src/QsPasses/src/OpsCounter/OpsCounter.hpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.hpp @@ -1,9 +1,43 @@ #pragma once -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include "Llvm.hpp" +class COpsCounterPass : public llvm::AnalysisInfoMixin +{ +public: + using Result = llvm::StringMap; + + Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) + { + COpsCounterPass::Result opcode_map; + + for (auto &basic_block : function) + { + for (auto &instruction : basic_block) + { + auto name = instruction.getOpcodeName(); + + if (opcode_map.find(name) == opcode_map.end()) + { + opcode_map[instruction.getOpcodeName()] = 1; + } + else + { + opcode_map[instruction.getOpcodeName()]++; + } + } + } + + return opcode_map; + } + +private: + static llvm::AnalysisKey Key; + friend struct llvm::AnalysisInfoMixin; +}; + class COpsCounterPrinter : public llvm::PassInfoMixin { public: @@ -11,15 +45,31 @@ class COpsCounterPrinter : public llvm::PassInfoMixin : out_stream_(out_stream) {} - // llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); - auto run(llvm::Function &f, llvm::FunctionAnalysisManager & /*unused*/) + auto run(llvm::Function &function, llvm::FunctionAnalysisManager &fam) -> llvm::PreservedAnalyses // NOLINT { - out_stream_ << "(operation-counter) " << f.getName() << "\n"; - out_stream_ << "(operation-counter) number of arguments: " << f.arg_size() << "\n"; + auto &opcode_map = fam.getResult(function); + + out_stream_ << "Stats for '" << function.getName() << "'\n"; + out_stream_ << "===========================\n"; + + constexpr auto str1 = "Opcode"; + constexpr auto str2 = "# Used"; + out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); + out_stream_ << "---------------------------" + << "\n"; + + for (auto const &instruction : opcode_map) + { + out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), + instruction.second); + } + out_stream_ << "---------------------------" + << "\n\n"; return llvm::PreservedAnalyses::all(); } + /* TODO(TFR): Documentation suggests that there such be a isRequired, however, comes out as unused after compilation From f09550873541649f65e69f5d4865bff9c1f85359 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 10:41:15 +0200 Subject: [PATCH 15/48] Adding style proposal --- src/QsPasses/CONTRIBUTING.md | 8 +- .../site-packages/TasksCI/formatting.py | 4 +- src/QsPasses/src/OpsCounter/OpsCounter.cpp | 46 +++---- src/QsPasses/src/OpsCounter/OpsCounter.hpp | 117 +++++++++--------- 4 files changed, 90 insertions(+), 85 deletions(-) diff --git a/src/QsPasses/CONTRIBUTING.md b/src/QsPasses/CONTRIBUTING.md index 5cdd18327f..0b4493bb8d 100644 --- a/src/QsPasses/CONTRIBUTING.md +++ b/src/QsPasses/CONTRIBUTING.md @@ -1,6 +1,12 @@ # Contributing (Proposal - WiP) -This document is work in progress and nothing is set in stone. +This document is work in progress and nothing is set in stone. In case you do not want to feel like reading this style guide, just run + +```sh +./manage runci +``` + +from the `QsPasses` directory as all points defined in this document is automatically enforces. You can then refer to this guide for an explanation for why and how. ## Why do we need a style guide? diff --git a/src/QsPasses/site-packages/TasksCI/formatting.py b/src/QsPasses/site-packages/TasksCI/formatting.py index 578580ba98..e03801bc7e 100644 --- a/src/QsPasses/site-packages/TasksCI/formatting.py +++ b/src/QsPasses/site-packages/TasksCI/formatting.py @@ -35,7 +35,7 @@ def require_todo_owner(filename, contents, cursor, fix_issues): def enforce_cpp_license(filename, contents, cursor, fix_issues): - return require_token("""// Copyright (c) Microsoft Corporation. + return require_token("""// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. """, filename, contents, cursor, fix_issues) @@ -46,7 +46,7 @@ def enforce_py_license(filename, contents, cursor, fix_issues): if contents.strip() == "": return cursor, False - return require_token("""# Copyright (c) Microsoft Corporation. + return require_token("""# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. """, filename, contents, cursor, fix_issues) diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.cpp b/src/QsPasses/src/OpsCounter/OpsCounter.cpp index 12922350ee..ac1b798e4b 100644 --- a/src/QsPasses/src/OpsCounter/OpsCounter.cpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.cpp @@ -12,33 +12,33 @@ llvm::AnalysisKey COpsCounterPass::Key; llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() { - return { - LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the printer - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "operation-counter") - { - fpm.addPass(COpsCounterPrinter(llvm::errs())); - return true; - } - return false; - }); + return { + LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, + [](PassBuilder& pb) + { + // Registering the printer + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) + { + if (name == "operation-counter") + { + fpm.addPass(COpsCounterPrinter(llvm::errs())); + return true; + } + return false; + }); - pb.registerVectorizerStartEPCallback( - [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { - fpm.addPass(COpsCounterPrinter(llvm::errs())); - }); + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager& fpm, llvm::PassBuilder::OptimizationLevel /*level*/) + { fpm.addPass(COpsCounterPrinter(llvm::errs())); }); - // Registering the analysis module - pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { - // TODO: Fails to load if this is present - fam.registerPass([] { return COpsCounterPass(); }); - }); - }}; + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager& fam) + { fam.registerPass([] { return COpsCounterPass(); }); }); + }}; } extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return GetOpsCounterPluginInfo(); + return GetOpsCounterPluginInfo(); } diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.hpp b/src/QsPasses/src/OpsCounter/OpsCounter.hpp index d4adf42781..44320aaff1 100644 --- a/src/QsPasses/src/OpsCounter/OpsCounter.hpp +++ b/src/QsPasses/src/OpsCounter/OpsCounter.hpp @@ -6,82 +6,81 @@ class COpsCounterPass : public llvm::AnalysisInfoMixin { -public: - using Result = llvm::StringMap; + public: + using Result = llvm::StringMap; - Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) - { - COpsCounterPass::Result opcode_map; - - for (auto &basic_block : function) + Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/) { - for (auto &instruction : basic_block) - { - auto name = instruction.getOpcodeName(); + COpsCounterPass::Result opcode_map; - if (opcode_map.find(name) == opcode_map.end()) - { - opcode_map[instruction.getOpcodeName()] = 1; - } - else + for (auto& basic_block : function) { - opcode_map[instruction.getOpcodeName()]++; + for (auto& instruction : basic_block) + { + auto name = instruction.getOpcodeName(); + + if (opcode_map.find(name) == opcode_map.end()) + { + opcode_map[instruction.getOpcodeName()] = 1; + } + else + { + opcode_map[instruction.getOpcodeName()]++; + } + } } - } - } - return opcode_map; - } + return opcode_map; + } -private: - static llvm::AnalysisKey Key; - friend struct llvm::AnalysisInfoMixin; + private: + static llvm::AnalysisKey Key; + friend struct llvm::AnalysisInfoMixin; }; class COpsCounterPrinter : public llvm::PassInfoMixin { -public: - explicit COpsCounterPrinter(llvm::raw_ostream &out_stream) - : out_stream_(out_stream) - {} - - auto run(llvm::Function &function, llvm::FunctionAnalysisManager &fam) - -> llvm::PreservedAnalyses // NOLINT - { - auto &opcode_map = fam.getResult(function); - - out_stream_ << "Stats for '" << function.getName() << "'\n"; - out_stream_ << "===========================\n"; - - constexpr auto str1 = "Opcode"; - constexpr auto str2 = "# Used"; - out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); - out_stream_ << "---------------------------" - << "\n"; - - for (auto const &instruction : opcode_map) + public: + explicit COpsCounterPrinter(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) { - out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), - instruction.second); } - out_stream_ << "---------------------------" - << "\n\n"; - return llvm::PreservedAnalyses::all(); - } + auto run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) -> llvm::PreservedAnalyses // NOLINT + { + auto& opcode_map = fam.getResult(function); + + out_stream_ << "Stats for '" << function.getName() << "'\n"; + out_stream_ << "===========================\n"; - /* - TODO(TFR): Documentation suggests that there such be a isRequired, however, comes out as - unused after compilation - */ + constexpr auto str1 = "Opcode"; + constexpr auto str2 = "# Used"; + out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); + out_stream_ << "---------------------------" + << "\n"; - static bool isRequired() - { - return true; - } + for (auto const& instruction : opcode_map) + { + out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); + } + out_stream_ << "---------------------------" + << "\n\n"; + + return llvm::PreservedAnalyses::all(); + } + + /* + TODO(TFR): Documentation suggests that there such be a isRequired, however, comes out as + unused after compilation + */ + + static bool isRequired() + { + return true; + } -private: - llvm::raw_ostream &out_stream_; + private: + llvm::raw_ostream& out_stream_; }; auto GetOpsCounterPluginInfo() -> llvm::PassPluginLibraryInfo; From b0c63d6a7329d9b2cb63686a3dc96a0e45c3673a Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 11:47:00 +0200 Subject: [PATCH 16/48] Updating documentation --- src/QsPasses/README.md | 140 ++++++++++++++++-- .../examples/ClassicalIrCommandline/Makefile | 4 +- .../classical-program.bc | Bin 2400 -> 0 bytes src/QsPasses/src/OpsCounter/OpsCounter.hpp | 5 +- 4 files changed, 133 insertions(+), 16 deletions(-) delete mode 100644 src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index e242736dbb..134ef75c0a 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -1,13 +1,22 @@ # Q# Passes for LLVM -This library defines LLVM passes used for optimising and transforming the IR. - -The Q# pass library is a dynamic library that can be compiled and ran separately from the -rest of the project code. +This library defines LLVM passes used for analysing, optimising and transforming the IR. The Q# pass library is a dynamic library that can be compiled and ran separately from the +rest of the project code. While it is not clear whether this possible at the moment, we hope that it will be possible to write passes that enforce the QIR standard. ## What does LLVM passes do? -Example 1: Optimisation +Before getting started, we here provide a few examples of classical use cases for LLVM passes. + +**Example 1: Transformation**. As a first example of what LLVM passes can do, we look at optimisation. Consider a compiler which +compiles + +```c +double test(double x) { + return (1+2+x)*(x+(1+2)); +} +``` + +into following IR: ``` define double @test(double %x) { @@ -19,17 +28,122 @@ entry: } ``` +This code is obviously inefficient as we could get rid of one operation by rewritting the code to: + +```c +double test(double x) { + double y = 3+x; + return y * y; +} +``` + +One purpose of LLVM passes is to allow automatic transformation from the above IR to the IR: + ``` define double @test(double %x) { entry: - %addtmp = fadd double 3.000000e+00, %x - ret double %addtmp + %addtmp = fadd double %x, 3.000000e+00 + %multmp = fmul double %addtmp, %addtmp + ret double %multmp +} +``` + +**Example 2: Analytics**. Another example of useful passes are those generating and collecting statistics about the program. For instance, one analytics program +makes sense for classical programs is to count instructions used to implement functions. Take the C program: + +```c +int foo(int x) +{ + return x; +} + +void bar(int x, int y) +{ + foo(x + y); +} + +int main() +{ + foo(2); + bar(3, 2); + + return 0; +} +``` + +which produces follow IR (without optimisation): + +```language +define dso_local i32 @foo(i32 %0) #0 { + %2 = alloca i32, align 4 + store i32 %0, i32* %2, align 4 + %3 = load i32, i32* %2, align 4 + ret i32 %3 +} + +define dso_local void @bar(i32 %0, i32 %1) #0 { + %3 = alloca i32, align 4 + %4 = alloca i32, align 4 + store i32 %0, i32* %3, align 4 + store i32 %1, i32* %4, align 4 + %5 = load i32, i32* %3, align 4 + %6 = load i32, i32* %4, align 4 + %7 = add nsw i32 %5, %6 + %8 = call i32 @foo(i32 %7) + ret void } + +define dso_local i32 @main() #0 { + %1 = alloca i32, align 4 + store i32 0, i32* %1, align 4 + %2 = call i32 @foo(i32 2) + call void @bar(i32 3, i32 2) + ret i32 0 +} +``` + +A stat pass for this code, would collect following statisics: + +```text +Stats for 'foo' +=========================== +Opcode # Used +--------------------------- +load 1 +ret 1 +alloca 1 +store 1 +--------------------------- + +Stats for 'bar' +=========================== +Opcode # Used +--------------------------- +load 2 +add 1 +ret 1 +alloca 2 +store 2 +call 1 +--------------------------- + +Stats for 'main' +=========================== +Opcode # Used +--------------------------- +ret 1 +alloca 1 +store 1 +call 2 +--------------------------- ``` -Example 2: Analytics +**Example 3: Code validation**. A third use case is code validation. For example, one could write a pass to check whether bounds are exceeded on static arrays [2]. +Note that this is a non-standard usecase as such analysis is usually made using the AST rather than at the IR level. -Example 3: Validation +**References** +[1] https://github.com/banach-space/llvm-tutor#analysis-vs-transformation-pass +[2] https://github.com/victor-fdez/llvm-array-check-pass ## Out-of-source Pass @@ -41,13 +155,13 @@ This library is build as set of out-of-source-passes. All this means is that we This library is written in C++ and depends on: -- LLVM +- LLVM Additional development dependencies include: -- CMake -- clang-format -- clang-tidy +- CMake +- clang-format +- clang-tidy ## Building the passes diff --git a/src/QsPasses/examples/ClassicalIrCommandline/Makefile b/src/QsPasses/examples/ClassicalIrCommandline/Makefile index 47609f40ac..f50340c5a5 100644 --- a/src/QsPasses/examples/ClassicalIrCommandline/Makefile +++ b/src/QsPasses/examples/ClassicalIrCommandline/Makefile @@ -1,8 +1,8 @@ emit-llvm: - clang -O1 -S -emit-llvm classical-program.c -o classical-program.ll + clang -O0 -S -emit-llvm classical-program.c -o classical-program.ll emit-llvm-bc: - clang -O1 -c -emit-llvm classical-program.c -o classical-program.bc + clang -O0 -c -emit-llvm classical-program.c -o classical-program.bc debug-ng-pass-mac: emit-llvm-bc diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc b/src/QsPasses/examples/ClassicalIrCommandline/classical-program.bc deleted file mode 100644 index bb5a7848a56b2412806ec92ed5bbcb91408915ef..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2400 zcmaJ@Z%kX)6~Fc~Tm$CW0o0h~y_few)_HA{*-qz~*=T@i>vViDT0Sr-6E*~D)&P%Z zivJ|qp0V3YH#9jB(mX9y?JZJM?F&`Wln>ElY(_m~RTNE;#SntkzaipspH(FhZJ6Pj2IzAgBUazrlL?)$jjqXyjk> zeRVx*w+3XA0U@mxp*pREU_j3R;dslnj-u{-waMOkU6V9?wMySC=ziL-nqJa&Ua#v+ zn(R$Zz4^Pf_ReXqyePGFHINxo$93(cqW*&>(ev`O_y>)>GiiR#@?6Sxb?)Ldaj^(` z-xd+7htYmYUn5?6Q`dOvW)iyXWH!~*&^PKR`UpAP-xm})nmo*MNs9D>$k zM(7tXp1Aw$qyIqDs7$-o($3`3J~PFw284VESI%;4s*soXa<+a+;>vM;Ph%yrpF}NK zwc@CrOj?LTC1EcPXNZ!*BHe?g_~n4Gm#yFI5S9Z})pEAJq!HF6uK4tLkW9$L;U|id zDs;>d#f)PS91SL}c(Ibf6KE5O>s$Pq$geMOpp9zC@cFo~@>GdT!a?SZTVzrtjtr4a z81O_<382EIn#%DUpI_kr&+$amOF}YHMB-Qh6G%8s99A3&CMNYb){b*r9<0uCD9>+X z>-Rf^yhhlR`1LLB378Cy-XaQ`jsgHPc_YX~KnZx`u{q56EgZ5^&5ksgfGf};689x; zXNy~o3u}IEGb^Bo1&34Qs*F`ok|H4sjs=;Jg~%BaOA#fF{q;+lllO9LxxyT(^>=gJ zVL;f4*WbU6>>GxS&X*Y2)V-_PK`u2dh2IE--2 zN>d{w;t^z2WISHssqkonh+}>{k-?MgcqGL_Pp2!@dn%SQcwEGhmFfp7jzO9n9jvT= zXu^t$zan+B@7Y}Lci;OcFj;r@$M5cL{q}YDP3>C8Di(0Y2jG*R!}>YB z$}MhjLrt2tX^%5mnCVn;G=f9val;-m-Z!xiRrZ{rQ-Cb~ILi%Lt18^ajSjZazP^8sPV`nKy% z!nNXQooC#467D49DjhaW`tg+@{#KAE(5o~VWj}-mPd=&wwPBU?>B+bmQj$c|@fRK( zvJW=dzvtKkE7Mw#1TK4Fq4T4Y5w?okebH7`PiKlyWMWw64?W=LbO0hfchk? zO5A!}Sl@)l#Q#Mj44HeJbDnSCNVrPA(~k}DlW+t_(mmY|Q%33m;fJ&)?MwW|0{T_5|I zwPj(oL)h6mUf$w20D!mIHJ50cPn<{=oHL%*Qo((Dp|w~58{H-E{J0(is&Q~&FKPbM z5_{k`-rt1GHtt%DyQ1+QK6cc^Zd+NHezsZt=S_CU$3C*Q+&c)+B2?ZwejxFC62A!b zSmO7klSAS=t~;K#+f3_hqP5g}2ci$g#sH_6gk|_xMMZ?Z205k(UePmcXs%K0Ua=)U zbM4wET^W<*lG|qQ{8>u+wbL|#UR~9o@IWx2^o53FgTC_~v3qcG@UnMoWUyZu42uJo z1Ea5rlY>fVcx+U(+0Rl8Az=-X{m$b8PwSG_ysc{^IiV4{oA>e5mt6up=!8xyS z-AuI2GH}NyCWPpl#3lF5gzE+*A=8#gxFI1^1^Rje;^R7{TcF!2b1HAt#kB$P=$qfD z>v#dwR9?HT1-_rakHZ?R0Cv`NDvAEdnCHHuYX41dL}(DI!}BmUFtdKDi`KzfAbLSJ z5S8@;(MCslDg#6ty@&ed>p+b_M;f25gXjyH8+eL=HoA{~)6wap%0=Z=_GNh&%;|p^ zii55{*$*uG0br*8TLAO>bUrjT)*nzt0>h(6agR71eU2B;1Z4U0;F*!Yz*s0^vq3^w iAxT1^;eo*AGqN)Fni3c}JJ1J*``~mRJx-N Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/) { COpsCounterPass::Result opcode_map; - for (auto& basic_block : function) { for (auto& instruction : basic_block) { + if (instruction.isDebugOrPseudoInst()) + { + continue; + } auto name = instruction.getOpcodeName(); if (opcode_map.find(name) == opcode_map.end()) From 564f5189ee899ccb5050c836cc0412c5fdd7e1a5 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 13:47:56 +0200 Subject: [PATCH 17/48] Template based pass generator --- src/QsPasses/CMakeLists.txt | 8 +-- src/QsPasses/{src => include}/Llvm.hpp | 0 src/QsPasses/libs/CMakeLists.txt | 43 +++++++++++++++ .../{src => libs}/OpsCounter/OpsCounter.cpp | 0 .../{src => libs}/OpsCounter/OpsCounter.hpp | 0 src/QsPasses/site-packages/TasksCI/cli.py | 55 ++++++++++++++++++- .../site-packages/TasksCI/formatting.py | 6 +- .../TasksCI/templates/basic/SPECIFICATION.md | 1 + .../TasksCI/templates/basic/{name}.cpp.tpl | 41 ++++++++++++++ .../TasksCI/templates/basic/{name}.hpp.tpl | 13 +++++ 10 files changed, 157 insertions(+), 10 deletions(-) rename src/QsPasses/{src => include}/Llvm.hpp (100%) create mode 100644 src/QsPasses/libs/CMakeLists.txt rename src/QsPasses/{src => libs}/OpsCounter/OpsCounter.cpp (100%) rename src/QsPasses/{src => libs}/OpsCounter/OpsCounter.hpp (100%) create mode 100644 src/QsPasses/site-packages/TasksCI/templates/basic/SPECIFICATION.md create mode 100644 src/QsPasses/site-packages/TasksCI/templates/basic/{name}.cpp.tpl create mode 100644 src/QsPasses/site-packages/TasksCI/templates/basic/{name}.hpp.tpl diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt index 976649b6ab..40c9491c1f 100644 --- a/src/QsPasses/CMakeLists.txt +++ b/src/QsPasses/CMakeLists.txt @@ -22,8 +22,6 @@ link_directories(${LLVM_LIBRARY_DIRS}) add_definitions(${LLVM_DEFINITIONS}) include_directories(${CMAKE_SOURCE_DIR}/src) - - # Compiler flags set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always") @@ -42,7 +40,5 @@ if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} EQUAL "1") endif() -# The main libary -add_library(QSharpPasses SHARED src/OpsCounter/OpsCounter.cpp) -target_link_libraries(QSharpPasses - "$<$:-undefined dynamic_lookup>") +add_subdirectory(libs) + diff --git a/src/QsPasses/src/Llvm.hpp b/src/QsPasses/include/Llvm.hpp similarity index 100% rename from src/QsPasses/src/Llvm.hpp rename to src/QsPasses/include/Llvm.hpp diff --git a/src/QsPasses/libs/CMakeLists.txt b/src/QsPasses/libs/CMakeLists.txt new file mode 100644 index 0000000000..578a55e711 --- /dev/null +++ b/src/QsPasses/libs/CMakeLists.txt @@ -0,0 +1,43 @@ + +macro(list_qs_passes result) + file(GLOB children RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/*) + set(dirlist "") + foreach(child ${children}) + if(IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/${child}) + list(APPEND dirlist ${child}) + endif() + endforeach() + set(${result} ${dirlist}) +endmacro() + +list_qs_passes(QS_PASSES) + +foreach(pass_plugin ${QS_PASSES}) + + # Getting sources + file(GLOB sources RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/${pass_plugin}/*.cpp) + + # Adding library + add_library(${pass_plugin} + SHARED + ${sources}) + + # Adding include directories + target_include_directories( + ${pass_plugin} + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}" + ) + + target_include_directories( + ${pass_plugin} + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/../include" + ) + + + # Linking + target_link_libraries(${pass_plugin} + "$<$:-undefined dynamic_lookup>") + +endforeach() diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.cpp b/src/QsPasses/libs/OpsCounter/OpsCounter.cpp similarity index 100% rename from src/QsPasses/src/OpsCounter/OpsCounter.cpp rename to src/QsPasses/libs/OpsCounter/OpsCounter.cpp diff --git a/src/QsPasses/src/OpsCounter/OpsCounter.hpp b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp similarity index 100% rename from src/QsPasses/src/OpsCounter/OpsCounter.hpp rename to src/QsPasses/libs/OpsCounter/OpsCounter.hpp diff --git a/src/QsPasses/site-packages/TasksCI/cli.py b/src/QsPasses/site-packages/TasksCI/cli.py index 62a4962ed6..09f9e9c4a1 100644 --- a/src/QsPasses/site-packages/TasksCI/cli.py +++ b/src/QsPasses/site-packages/TasksCI/cli.py @@ -9,7 +9,11 @@ import click import logging import sys +import os +LIB_DIR = os.path.abspath(os.path.dirname((__file__))) +TEMPLATE_DIR = os.path.join(LIB_DIR, "templates") +SOURCE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(LIB_DIR))) logger = logging.getLogger() # Logging configuration @@ -63,7 +67,7 @@ def lint(diagnose, fix_issues, force): if fix_issues: if not force: - print("""Fixing isssues using Clang Tidy will break your code. + print("""Fixing isssues using Clang Tidy will break your code. Make sure that you have committed your changes BEFORE DOING THIS. Even so, this feature is experimental and there have been reports of clang-tidy modying system libraries - therefore, USE THIS FEATURE AT @@ -102,5 +106,54 @@ def runci(): builder_main(build_dir, None, True) +@cli.command() +@click.argument( + "name" +) +@click.option( + "--template", + default="basic", +) +def create_pass(name, template): + + target_dir = os.path.join(SOURCE_DIR, "libs", name) + if os.path.exists(target_dir): + logger.error("Pass '{}' already exists".format(name)) + exit(-1) + + if template is None: + raise BaseException("Choice is not implemented yet") + + template_dir = os.path.join(TEMPLATE_DIR, template) + if not os.path.exists(template_dir): + logger.error("Template not found") + exit(-1) + + logger.info(" ".join(["Creating", name, "in", target_dir])) + os.makedirs(target_dir) + + for root, dirs, files in os.walk(template_dir): + + # Creating dirs + for d in dirs: + os.makedirs(os.path.join(target_dir, d)) + + # Generating files + for f in files: + src = os.path.join(root, f) + dest = os.path.join(target_dir, f).format(name=name) + + with open(src, "r") as fb: + contents = fb.read() + + contents = contents.replace("{{name}}", name) + contents = contents.replace("{{name}}", name) + + # with open(dest, "w") as fb: + # fb.write(contents) + + logger.info("- Wrote {}".format(dest)) + + if __name__ == '__main__': cli() diff --git a/src/QsPasses/site-packages/TasksCI/formatting.py b/src/QsPasses/site-packages/TasksCI/formatting.py index e03801bc7e..ebe14283cc 100644 --- a/src/QsPasses/site-packages/TasksCI/formatting.py +++ b/src/QsPasses/site-packages/TasksCI/formatting.py @@ -83,10 +83,10 @@ def enforce_formatting(filename, contents, cursor, fix_issues): # Source pipeline definitions -AUTO_FORMAT_LANGUAGES = [ +SOURCE_PIPELINES = [ { "name": "C++ Main", - "src": path.join(PROJECT_ROOT, "src"), + "src": path.join(PROJECT_ROOT, "libs"), "pipelines": { "hpp": [ @@ -131,7 +131,7 @@ def execute_pipeline(pipeline, filename: str, fix_issues: bool): def main(fix_issues: bool = False): failed = False - for language in AUTO_FORMAT_LANGUAGES: + for language in SOURCE_PIPELINES: logger.info("Formatting {}".format(language["name"])) basedir = language["src"] pipelines = language["pipelines"] diff --git a/src/QsPasses/site-packages/TasksCI/templates/basic/SPECIFICATION.md b/src/QsPasses/site-packages/TasksCI/templates/basic/SPECIFICATION.md new file mode 100644 index 0000000000..f051462f55 --- /dev/null +++ b/src/QsPasses/site-packages/TasksCI/templates/basic/SPECIFICATION.md @@ -0,0 +1 @@ +# {{name}} Specification diff --git a/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.cpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.cpp.tpl new file mode 100644 index 0000000000..c5fcb33feb --- /dev/null +++ b/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.cpp.tpl @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "{name}/{name}.hpp" + +#include "Llvm.hpp" + +#include +#include + +llvm::PreservedAnalyses C{name}Pass::run(llvm::Function &/*function*/, llvm::FunctionAnalysisManager &/*fam*/) +{ + // Implement the pass details here + return llvm::PreservedAnalyses::all(); +} + + +// Registering the plugin +llvm::PassPluginLibraryInfo Get{name}PluginInfo() +{ + using namespace llvm; + return { + LLVM_PLUGIN_API_VERSION, "{name}", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the pass + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "{operation_name}") + { + fpm.addPass(C{name}Pass(llvm::errs())); + return true; + } + + return false; + }); + }}; +} + +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return Get{name}PluginInfo(); +} diff --git a/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.hpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.hpp.tpl new file mode 100644 index 0000000000..dfd33eeb1d --- /dev/null +++ b/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.hpp.tpl @@ -0,0 +1,13 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" + +class C{name}Pass : public llvm::PassInfoMixin +{ +public: + llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); +}; + +auto Get{name}PluginInfo() -> llvm::PassPluginLibraryInfo; From c80fd35725397256545182c618c0a6933c35e8fb Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 14:16:24 +0200 Subject: [PATCH 18/48] Updating template and writing more documentation --- src/QsPasses/README.md | 46 ++++++++++++++++-- src/QsPasses/site-packages/TasksCI/cli.py | 47 ++++++++++++++++--- .../{basic => FunctionPass}/SPECIFICATION.md | 0 .../{basic => FunctionPass}/{name}.cpp.tpl | 7 ++- .../{basic => FunctionPass}/{name}.hpp.tpl | 0 5 files changed, 87 insertions(+), 13 deletions(-) rename src/QsPasses/site-packages/TasksCI/templates/{basic => FunctionPass}/SPECIFICATION.md (100%) rename src/QsPasses/site-packages/TasksCI/templates/{basic => FunctionPass}/{name}.cpp.tpl (90%) rename src/QsPasses/site-packages/TasksCI/templates/{basic => FunctionPass}/{name}.hpp.tpl (100%) diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index 134ef75c0a..07edfded05 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -155,13 +155,13 @@ This library is build as set of out-of-source-passes. All this means is that we This library is written in C++ and depends on: -- LLVM +- LLVM Additional development dependencies include: -- CMake -- clang-format -- clang-tidy +- CMake +- clang-format +- clang-tidy ## Building the passes @@ -194,6 +194,44 @@ opt -load-pass-plugin ../../{Debug,Release}/libQSharpPasses.{dylib,so} --passes= For a gentle introduction, see examples. +## Creating a new pass + +To make it easy to create a new pass, we have created a few templates to get you started quickly: + +```sh +% ./manage create-pass HelloWorld +Available templates: + +1. Function Pass + +Select a template:1 +``` + +At the moment you only have one choice which is a function pass. Over time we will add additional templates. Once you have instantiated your template, you are ready to build it: + +```sh +% mkdir Debug +% cd Debug +% cmake .. +-- The C compiler identification is AppleClang 12.0.5.12050022 +-- The CXX compiler identification is AppleClang 12.0.5.12050022 +(...) +-- Configuring done +-- Generating done +-- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/Debug + +% make + +[ 25%] Building CXX object libs/CMakeFiles/OpsCounter.dir/OpsCounter/OpsCounter.cpp.o +[ 50%] Linking CXX shared library libOpsCounter.dylib +[ 50%] Built target OpsCounter +[ 75%] Building CXX object libs/CMakeFiles/HelloWorld.dir/HelloWorld/HelloWorld.cpp.o +[100%] Linking CXX shared library libHelloWorld.dylib +[100%] Built target HelloWorld +``` + +Your new pass is ready to be implemented. Open `libs/HelloWorld/HelloWorld.cpp` to implement the details of the pass. At the moment, the template will not do much except for print the function names of your code. + ## CI Before making a pull request with changes to this library, please ensure that style checks passes, that the code compiles, diff --git a/src/QsPasses/site-packages/TasksCI/cli.py b/src/QsPasses/site-packages/TasksCI/cli.py index 09f9e9c4a1..adeab1dea0 100644 --- a/src/QsPasses/site-packages/TasksCI/cli.py +++ b/src/QsPasses/site-packages/TasksCI/cli.py @@ -10,6 +10,7 @@ import logging import sys import os +import re LIB_DIR = os.path.abspath(os.path.dirname((__file__))) TEMPLATE_DIR = os.path.join(LIB_DIR, "templates") @@ -112,7 +113,7 @@ def runci(): ) @click.option( "--template", - default="basic", + default=None, ) def create_pass(name, template): @@ -122,13 +123,37 @@ def create_pass(name, template): exit(-1) if template is None: - raise BaseException("Choice is not implemented yet") + # Listing options + options = [] + print("Available templates:") + print("") + for template_name in os.listdir(TEMPLATE_DIR): + if os.path.isdir(os.path.join(TEMPLATE_DIR, template_name)): + options.append(template_name) + + # Printing option + pretty_template_name = re.sub(r'(? len(options) + 1: + try: + n = input("Select a template:") + n = int(n) + except: # noqa: E722 + logger.error("Invalid choice") + exit(-1) + + # Getting the template + template = options[n - 1] template_dir = os.path.join(TEMPLATE_DIR, template) if not os.path.exists(template_dir): - logger.error("Template not found") + logger.error("Template does not exist") exit(-1) + operation_name = re.sub(r'(? /*unused*/) { if (name == "{operation_name}") { - fpm.addPass(C{name}Pass(llvm::errs())); + fpm.addPass(C{name}Pass()); return true; } diff --git a/src/QsPasses/site-packages/TasksCI/templates/basic/{name}.hpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl similarity index 100% rename from src/QsPasses/site-packages/TasksCI/templates/basic/{name}.hpp.tpl rename to src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl From 3078388baf241198c362c370fb9860f1a3fa527e Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 14:36:34 +0200 Subject: [PATCH 19/48] Adding introduction on how to create a pass --- src/QsPasses/README.md | 18 +++++++++++++++++- .../templates/FunctionPass/{name}.cpp.tpl | 14 +++++++++++--- .../templates/FunctionPass/{name}.hpp.tpl | 19 ++++++++++++++++++- 3 files changed, 46 insertions(+), 5 deletions(-) diff --git a/src/QsPasses/README.md b/src/QsPasses/README.md index 07edfded05..ade1b0c9f4 100644 --- a/src/QsPasses/README.md +++ b/src/QsPasses/README.md @@ -230,7 +230,23 @@ At the moment you only have one choice which is a function pass. Over time we wi [100%] Built target HelloWorld ``` -Your new pass is ready to be implemented. Open `libs/HelloWorld/HelloWorld.cpp` to implement the details of the pass. At the moment, the template will not do much except for print the function names of your code. +Your new pass is ready to be implemented. Open `libs/HelloWorld/HelloWorld.cpp` to implement the details of the pass. At the moment, the +template will not do much except for print the function names of your code. To test your new pass go to the directory `examples/ClassicalIrCommandline`, +build an IR and run the pass: + +```sh +% cd ../examples/ClassicalIrCommandline +% make +% opt -load-pass-plugin ../../Debug/libs/libHelloWorld.{dylib,so} --passes="hello-world" -disable-output classical-program.ll +``` + +If everything worked, you should see output like this: + +```sh +Implement your pass here: foo +Implement your pass here: bar +Implement your pass here: main +``` ## CI diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl index 61084a1482..feab1944fd 100644 --- a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl +++ b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl @@ -8,17 +8,22 @@ #include #include -llvm::PreservedAnalyses C{name}Pass::run(llvm::Function &/*function*/, llvm::FunctionAnalysisManager &/*fam*/) +llvm::PreservedAnalyses C{name}Pass::run(llvm::Function &function, llvm::FunctionAnalysisManager &/*fam*/) { // Pass body - llvm::errs() << "Implement your pass here\n"; + llvm::errs() << "Implement your pass here: " << function.getName() << "\n"; return llvm::PreservedAnalyses::all(); } +bool C{name}Pass::isRequired() +{ + return true; +} -// Registering the plugin +// Helper functions which we do not expose externally +namespace { llvm::PassPluginLibraryInfo Get{name}PluginInfo() { using namespace llvm; @@ -37,7 +42,10 @@ llvm::PassPluginLibraryInfo Get{name}PluginInfo() }); }}; } +} + +// Interface for loading the plugin extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { return Get{name}PluginInfo(); diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl index dfd33eeb1d..39e9026be4 100644 --- a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl +++ b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl @@ -7,7 +7,24 @@ class C{name}Pass : public llvm::PassInfoMixin { public: + /// Constructors and destructors + /// @{ + C{name}Pass() = default; + C{name}Pass(C{name}Pass const &) = default; + C{name}Pass(C{name}Pass &&) = default; + ~C{name}Pass() = default; + /// @} + + /// Operators + /// @{ + C{name}Pass &operator=(C{name}Pass const &) = default; + C{name}Pass &operator=(C{name}Pass &&) = default; + /// @} + + /// Functions required by LLVM + /// @{ llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); + static bool isRequired(); + /// @} }; -auto Get{name}PluginInfo() -> llvm::PassPluginLibraryInfo; From e975b25779e401dcd5807aa524ca2ec0923f6172 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 15:03:10 +0200 Subject: [PATCH 20/48] Improving code quality --- src/QsPasses/libs/OpsCounter/OpsCounter.cpp | 73 ++++++++++++- src/QsPasses/libs/OpsCounter/OpsCounter.hpp | 110 ++++++++------------ 2 files changed, 111 insertions(+), 72 deletions(-) diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.cpp b/src/QsPasses/libs/OpsCounter/OpsCounter.cpp index ac1b798e4b..293e584b4d 100644 --- a/src/QsPasses/libs/OpsCounter/OpsCounter.cpp +++ b/src/QsPasses/libs/OpsCounter/OpsCounter.cpp @@ -8,8 +8,74 @@ #include #include using namespace llvm; -llvm::AnalysisKey COpsCounterPass::Key; +COpsCounterAnalytics::Result COpsCounterAnalytics::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& /*unused*/) +{ + COpsCounterAnalytics::Result opcode_map; + for (auto& basic_block : function) + { + for (auto& instruction : basic_block) + { + if (instruction.isDebugOrPseudoInst()) + { + continue; + } + auto name = instruction.getOpcodeName(); + + if (opcode_map.find(name) == opcode_map.end()) + { + opcode_map[instruction.getOpcodeName()] = 1; + } + else + { + opcode_map[instruction.getOpcodeName()]++; + } + } + } + + return opcode_map; +} + +COpsCounterPrinter::COpsCounterPrinter(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) +{ +} + +llvm::PreservedAnalyses COpsCounterPrinter::run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) +{ + auto& opcode_map = fam.getResult(function); + + out_stream_ << "Stats for '" << function.getName() << "'\n"; + out_stream_ << "===========================\n"; + + constexpr auto str1 = "Opcode"; + constexpr auto str2 = "# Used"; + out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); + out_stream_ << "---------------------------" + << "\n"; + + for (auto const& instruction : opcode_map) + { + out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); + } + out_stream_ << "---------------------------" + << "\n\n"; + + return llvm::PreservedAnalyses::all(); +} + +bool COpsCounterPrinter::isRequired() +{ + return true; +} + +llvm::AnalysisKey COpsCounterAnalytics::Key; + +// Interface to plugin +namespace +{ llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() { return { @@ -20,7 +86,7 @@ llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() pb.registerPipelineParsingCallback( [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) { - if (name == "operation-counter") + if (name == "print") { fpm.addPass(COpsCounterPrinter(llvm::errs())); return true; @@ -34,9 +100,10 @@ llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() // Registering the analysis module pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager& fam) - { fam.registerPass([] { return COpsCounterPass(); }); }); + { fam.registerPass([] { return COpsCounterAnalytics(); }); }); }}; } +} // namespace extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp index a5fb2864b8..b92c430ce5 100644 --- a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp +++ b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp @@ -4,86 +4,58 @@ #include "Llvm.hpp" -class COpsCounterPass : public llvm::AnalysisInfoMixin +class COpsCounterAnalytics : public llvm::AnalysisInfoMixin { public: using Result = llvm::StringMap; - Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/) - { - COpsCounterPass::Result opcode_map; - for (auto& basic_block : function) - { - for (auto& instruction : basic_block) - { - if (instruction.isDebugOrPseudoInst()) - { - continue; - } - auto name = instruction.getOpcodeName(); - - if (opcode_map.find(name) == opcode_map.end()) - { - opcode_map[instruction.getOpcodeName()] = 1; - } - else - { - opcode_map[instruction.getOpcodeName()]++; - } - } - } - - return opcode_map; - } - + /// Constructors and destructors + /// @{ + COpsCounterAnalytics() = default; + COpsCounterAnalytics(COpsCounterAnalytics const&) = delete; + COpsCounterAnalytics(COpsCounterAnalytics&&) = default; + ~COpsCounterAnalytics() = default; + /// @} + + /// Operators + /// @{ + COpsCounterAnalytics& operator=(COpsCounterAnalytics const&) = delete; + COpsCounterAnalytics& operator=(COpsCounterAnalytics&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); + /// @} private: static llvm::AnalysisKey Key; - friend struct llvm::AnalysisInfoMixin; + friend struct llvm::AnalysisInfoMixin; }; class COpsCounterPrinter : public llvm::PassInfoMixin { public: - explicit COpsCounterPrinter(llvm::raw_ostream& out_stream) - : out_stream_(out_stream) - { - } - - auto run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) -> llvm::PreservedAnalyses // NOLINT - { - auto& opcode_map = fam.getResult(function); - - out_stream_ << "Stats for '" << function.getName() << "'\n"; - out_stream_ << "===========================\n"; - - constexpr auto str1 = "Opcode"; - constexpr auto str2 = "# Used"; - out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); - out_stream_ << "---------------------------" - << "\n"; - - for (auto const& instruction : opcode_map) - { - out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); - } - out_stream_ << "---------------------------" - << "\n\n"; - - return llvm::PreservedAnalyses::all(); - } - - /* - TODO(TFR): Documentation suggests that there such be a isRequired, however, comes out as - unused after compilation - */ - - static bool isRequired() - { - return true; - } - + explicit COpsCounterPrinter(llvm::raw_ostream& out_stream); + + /// Constructors and destructors + /// @{ + COpsCounterPrinter() = delete; + COpsCounterPrinter(COpsCounterPrinter const&) = delete; + COpsCounterPrinter(COpsCounterPrinter&&) = default; + ~COpsCounterPrinter() = default; + /// @} + + /// Operators + /// @{ + COpsCounterPrinter& operator=(COpsCounterPrinter const&) = delete; + COpsCounterPrinter& operator=(COpsCounterPrinter&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} private: llvm::raw_ostream& out_stream_; }; - -auto GetOpsCounterPluginInfo() -> llvm::PassPluginLibraryInfo; From 06b09ed69b82e9dc2709fc97902fc22fd6360bda Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 15:04:05 +0200 Subject: [PATCH 21/48] Improving code quality --- src/QsPasses/libs/OpsCounter/OpsCounter.hpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp index b92c430ce5..0c6400f005 100644 --- a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp +++ b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp @@ -35,10 +35,9 @@ class COpsCounterAnalytics : public llvm::AnalysisInfoMixin { public: - explicit COpsCounterPrinter(llvm::raw_ostream& out_stream); - /// Constructors and destructors /// @{ + explicit COpsCounterPrinter(llvm::raw_ostream& out_stream); COpsCounterPrinter() = delete; COpsCounterPrinter(COpsCounterPrinter const&) = delete; COpsCounterPrinter(COpsCounterPrinter&&) = default; From fdb465a88911c0aed8975ccbc6a5a9f45bd12d2c Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 15:20:57 +0200 Subject: [PATCH 22/48] Adding namespaces to passes --- src/QsPasses/libs/OpsCounter/OpsCounter.cpp | 100 +++++++++------- src/QsPasses/libs/OpsCounter/OpsCounter.hpp | 112 ++++++++++-------- .../templates/FunctionPass/{name}.cpp.tpl | 8 ++ .../templates/FunctionPass/{name}.hpp.tpl | 7 ++ 4 files changed, 129 insertions(+), 98 deletions(-) diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.cpp b/src/QsPasses/libs/OpsCounter/OpsCounter.cpp index 293e584b4d..b2030f7c91 100644 --- a/src/QsPasses/libs/OpsCounter/OpsCounter.cpp +++ b/src/QsPasses/libs/OpsCounter/OpsCounter.cpp @@ -9,75 +9,83 @@ #include using namespace llvm; -COpsCounterAnalytics::Result COpsCounterAnalytics::run( - llvm::Function& function, - llvm::FunctionAnalysisManager& /*unused*/) +namespace Microsoft { - COpsCounterAnalytics::Result opcode_map; - for (auto& basic_block : function) +namespace Quantum +{ + COpsCounterAnalytics::Result COpsCounterAnalytics::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& /*unused*/) { - for (auto& instruction : basic_block) + COpsCounterAnalytics::Result opcode_map; + for (auto& basic_block : function) { - if (instruction.isDebugOrPseudoInst()) + for (auto& instruction : basic_block) { - continue; - } - auto name = instruction.getOpcodeName(); + if (instruction.isDebugOrPseudoInst()) + { + continue; + } + auto name = instruction.getOpcodeName(); - if (opcode_map.find(name) == opcode_map.end()) - { - opcode_map[instruction.getOpcodeName()] = 1; - } - else - { - opcode_map[instruction.getOpcodeName()]++; + if (opcode_map.find(name) == opcode_map.end()) + { + opcode_map[instruction.getOpcodeName()] = 1; + } + else + { + opcode_map[instruction.getOpcodeName()]++; + } } } + + return opcode_map; } - return opcode_map; -} + COpsCounterPrinter::COpsCounterPrinter(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) + { + } -COpsCounterPrinter::COpsCounterPrinter(llvm::raw_ostream& out_stream) - : out_stream_(out_stream) -{ -} + llvm::PreservedAnalyses COpsCounterPrinter::run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) + { + auto& opcode_map = fam.getResult(function); -llvm::PreservedAnalyses COpsCounterPrinter::run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) -{ - auto& opcode_map = fam.getResult(function); + out_stream_ << "Stats for '" << function.getName() << "'\n"; + out_stream_ << "===========================\n"; - out_stream_ << "Stats for '" << function.getName() << "'\n"; - out_stream_ << "===========================\n"; + constexpr auto str1 = "Opcode"; + constexpr auto str2 = "# Used"; + out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); + out_stream_ << "---------------------------" + << "\n"; - constexpr auto str1 = "Opcode"; - constexpr auto str2 = "# Used"; - out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); - out_stream_ << "---------------------------" - << "\n"; + for (auto const& instruction : opcode_map) + { + out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); + } + out_stream_ << "---------------------------" + << "\n\n"; - for (auto const& instruction : opcode_map) - { - out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); + return llvm::PreservedAnalyses::all(); } - out_stream_ << "---------------------------" - << "\n\n"; - return llvm::PreservedAnalyses::all(); -} - -bool COpsCounterPrinter::isRequired() -{ - return true; -} + bool COpsCounterPrinter::isRequired() + { + return true; + } -llvm::AnalysisKey COpsCounterAnalytics::Key; + llvm::AnalysisKey COpsCounterAnalytics::Key; +} // namespace Quantum +} // namespace Microsoft // Interface to plugin namespace { llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() { + using namespace Microsoft::Quantum; + return { LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder& pb) diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp index 0c6400f005..0662766e59 100644 --- a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp +++ b/src/QsPasses/libs/OpsCounter/OpsCounter.hpp @@ -4,57 +4,65 @@ #include "Llvm.hpp" -class COpsCounterAnalytics : public llvm::AnalysisInfoMixin +namespace Microsoft { - public: - using Result = llvm::StringMap; - - /// Constructors and destructors - /// @{ - COpsCounterAnalytics() = default; - COpsCounterAnalytics(COpsCounterAnalytics const&) = delete; - COpsCounterAnalytics(COpsCounterAnalytics&&) = default; - ~COpsCounterAnalytics() = default; - /// @} - - /// Operators - /// @{ - COpsCounterAnalytics& operator=(COpsCounterAnalytics const&) = delete; - COpsCounterAnalytics& operator=(COpsCounterAnalytics&&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); - /// @} - private: - static llvm::AnalysisKey Key; - friend struct llvm::AnalysisInfoMixin; -}; - -class COpsCounterPrinter : public llvm::PassInfoMixin +namespace Quantum { - public: - /// Constructors and destructors - /// @{ - explicit COpsCounterPrinter(llvm::raw_ostream& out_stream); - COpsCounterPrinter() = delete; - COpsCounterPrinter(COpsCounterPrinter const&) = delete; - COpsCounterPrinter(COpsCounterPrinter&&) = default; - ~COpsCounterPrinter() = default; - /// @} - - /// Operators - /// @{ - COpsCounterPrinter& operator=(COpsCounterPrinter const&) = delete; - COpsCounterPrinter& operator=(COpsCounterPrinter&&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); - static bool isRequired(); - /// @} - private: - llvm::raw_ostream& out_stream_; -}; + + class COpsCounterAnalytics : public llvm::AnalysisInfoMixin + { + public: + using Result = llvm::StringMap; + + /// Constructors and destructors + /// @{ + COpsCounterAnalytics() = default; + COpsCounterAnalytics(COpsCounterAnalytics const&) = delete; + COpsCounterAnalytics(COpsCounterAnalytics&&) = default; + ~COpsCounterAnalytics() = default; + /// @} + + /// Operators + /// @{ + COpsCounterAnalytics& operator=(COpsCounterAnalytics const&) = delete; + COpsCounterAnalytics& operator=(COpsCounterAnalytics&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); + /// @} + private: + static llvm::AnalysisKey Key; + friend struct llvm::AnalysisInfoMixin; + }; + + class COpsCounterPrinter : public llvm::PassInfoMixin + { + public: + /// Constructors and destructors + /// @{ + explicit COpsCounterPrinter(llvm::raw_ostream& out_stream); + COpsCounterPrinter() = delete; + COpsCounterPrinter(COpsCounterPrinter const&) = delete; + COpsCounterPrinter(COpsCounterPrinter&&) = default; + ~COpsCounterPrinter() = default; + /// @} + + /// Operators + /// @{ + COpsCounterPrinter& operator=(COpsCounterPrinter const&) = delete; + COpsCounterPrinter& operator=(COpsCounterPrinter&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} + private: + llvm::raw_ostream& out_stream_; + }; + +} // namespace Quantum +} // namespace Microsoft diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl index feab1944fd..7cb82d1f3d 100644 --- a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl +++ b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl @@ -8,6 +8,10 @@ #include #include +namespace Microsoft +{ +namespace Quantum +{ llvm::PreservedAnalyses C{name}Pass::run(llvm::Function &function, llvm::FunctionAnalysisManager &/*fam*/) { // Pass body @@ -21,12 +25,16 @@ bool C{name}Pass::isRequired() { return true; } +} // namespace Quantum +} // namespace Microsoft // Helper functions which we do not expose externally namespace { llvm::PassPluginLibraryInfo Get{name}PluginInfo() { + using namespace Microsoft::Quantum; using namespace llvm; + return { LLVM_PLUGIN_API_VERSION, "{name}", LLVM_VERSION_STRING, [](PassBuilder &pb) { // Registering the pass diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl index 39e9026be4..d413f43e24 100644 --- a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl +++ b/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl @@ -4,6 +4,11 @@ #include "Llvm.hpp" +namespace Microsoft +{ +namespace Quantum +{ + class C{name}Pass : public llvm::PassInfoMixin { public: @@ -28,3 +33,5 @@ public: /// @} }; +} // namespace Quantum +} // namespace Microsoft From dd810afb797228af49594728ede8b4ab444ae2d4 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Thu, 22 Jul 2021 15:53:34 +0200 Subject: [PATCH 23/48] Adding comments to the source --- src/QsPasses/CMakeLists.txt | 30 ++++++----- src/QsPasses/manage | 10 ++-- src/QsPasses/site-packages/TasksCI/builder.py | 21 +++++++- src/QsPasses/site-packages/TasksCI/cli.py | 52 +++++++++++++++++-- .../site-packages/TasksCI/formatting.py | 52 ++++++++++++++----- .../site-packages/TasksCI/toolchain.py | 8 +++ 6 files changed, 138 insertions(+), 35 deletions(-) diff --git a/src/QsPasses/CMakeLists.txt b/src/QsPasses/CMakeLists.txt index 40c9491c1f..49fb66942b 100644 --- a/src/QsPasses/CMakeLists.txt +++ b/src/QsPasses/CMakeLists.txt @@ -3,28 +3,22 @@ cmake_minimum_required(VERSION 3.4.3) project(QSharpPasses) find_package(LLVM REQUIRED CONFIG) +include(CheckCXXCompilerFlag) message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") -# Setting the standard for +# Setting the standard configuration for the C++ compiler +# Rather than allowing C++17, we restrict the ourselves to +# C++14 as this is the standard currently used by LLVM. While +# there is a very small change that the difference in standard +# would break things, it is a possibility nonetheless. set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") -# Needed for clang-tidy -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -include_directories(${LLVM_INCLUDE_DIRS}) -link_directories(${LLVM_LIBRARY_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/src) - -# Compiler flags -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always") - # LLVM is normally built without RTTI. Be consistent with that. if(NOT LLVM_ENABLE_RTTI) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") @@ -33,12 +27,22 @@ endif() # -fvisibility-inlines-hidden is set when building LLVM and on Darwin warnings # are triggered if llvm-tutor is built without this flag (though otherwise it # builds fine). For consistency, add it here too. -include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-fvisibility-inlines-hidden" SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG) if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} EQUAL "1") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") endif() +# We export the compile commands which are needed by clang-tidy +# to run the static analysis +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +# Adding LLVM include directories. We may choose +# to move this to a module level at a later point +include_directories(${LLVM_INCLUDE_DIRS}) +link_directories(${LLVM_LIBRARY_DIRS}) +add_definitions(${LLVM_DEFINITIONS}) +include_directories(${CMAKE_SOURCE_DIR}/src) +# Adding the libraries add_subdirectory(libs) diff --git a/src/QsPasses/manage b/src/QsPasses/manage index ada5de795d..d49d9f2b6f 100755 --- a/src/QsPasses/manage +++ b/src/QsPasses/manage @@ -2,9 +2,11 @@ import os import sys +# Adding the site-packages directory to our Python path +# in order to access the TasksCI module ROOT = os.path.dirname(__file__) -sys.path.insert(0,os.path.join(ROOT, "site-packages")) +sys.path.insert(0, os.path.join(ROOT, "site-packages")) -from TasksCI.cli import cli - -cli() \ No newline at end of file +# Loading the CLI tool and running it +from TasksCI.cli import cli # noqa: E402 +cli() diff --git a/src/QsPasses/site-packages/TasksCI/builder.py b/src/QsPasses/site-packages/TasksCI/builder.py index 73d8f7fb87..bd6b574b04 100644 --- a/src/QsPasses/site-packages/TasksCI/builder.py +++ b/src/QsPasses/site-packages/TasksCI/builder.py @@ -3,6 +3,7 @@ import os from . import settings +from . import toolchain from .settings import PROJECT_ROOT import logging import subprocess @@ -12,6 +13,10 @@ def configure_cmake(build_dir: str, generator=None): + """ + Function that creates a build directory and runs + cmake to configure make, ninja or another generator. + """ logger.info("Source: {}".format(PROJECT_ROOT)) logger.info("Build : {}".format(build_dir)) @@ -19,7 +24,7 @@ def configure_cmake(build_dir: str, generator=None): os.chdir(PROJECT_ROOT) os.makedirs(build_dir, exist_ok=True) - cmake_cmd = ['cmake'] # TODO: get from toolchain + cmake_cmd = [toolchain.discover_cmake()] if generator is not None: cmake_cmd += ['-G', generator] @@ -33,6 +38,10 @@ def configure_cmake(build_dir: str, generator=None): def build_project(build_dir: str, generator=None, concurrency=None): + """ + Given a build directory, this function builds all targets using + a specified generator and concurrency. + """ if generator in ["make", None]: cmd = ["make"] @@ -52,7 +61,11 @@ def build_project(build_dir: str, generator=None, concurrency=None): def run_tests(build_dir: str, concurrency=None): - cmake_cmd = ['ctest'] # TODO: get from toolchain + """ + Runs the unit tests given a build directory. + """ + + cmake_cmd = [toolchain.discover_ctest()] if concurrency is not None: raise BaseException("No support for concurrent testing at the moment.") @@ -64,6 +77,10 @@ def run_tests(build_dir: str, concurrency=None): def main(build_dir: str, generator=None, test: bool = False): + """ + Runs the entire build process by first configuring, the building + and optionally testing the codebase. + """ configure_cmake(build_dir, generator) diff --git a/src/QsPasses/site-packages/TasksCI/cli.py b/src/QsPasses/site-packages/TasksCI/cli.py index adeab1dea0..c3984130d7 100644 --- a/src/QsPasses/site-packages/TasksCI/cli.py +++ b/src/QsPasses/site-packages/TasksCI/cli.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. - from .formatting import main as style_check_main from .builder import main as builder_main from .linting import main as lint_main, clang_tidy_diagnose @@ -12,12 +11,13 @@ import os import re +# Important directories LIB_DIR = os.path.abspath(os.path.dirname((__file__))) TEMPLATE_DIR = os.path.join(LIB_DIR, "templates") SOURCE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(LIB_DIR))) -logger = logging.getLogger() # Logging configuration +logger = logging.getLogger() ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') @@ -31,6 +31,11 @@ @click.group() @click.option('--loglevel', default="error") def cli(loglevel): + """ + Implements the general CLI options such as logging level. + """ + + # Valid values levels = { "critical": 50, "error": 40, @@ -40,6 +45,7 @@ def cli(loglevel): "notset": 0 } + # Getting the logging level and updating loglevel = loglevel.lower() if loglevel not in levels: logger.critical("Invalid log level") @@ -52,8 +58,12 @@ def cli(loglevel): @cli.command() @click.option('--fix-issues', default=False, is_flag=True) def stylecheck(fix_issues): - logger.info("Invoking style checker") + """ + Command for checking the style and optionally fixing issues. + Note that some issues are not automatically fixed. + """ + logger.info("Invoking style checker") style_check_main(fix_issues) @@ -62,10 +72,20 @@ def stylecheck(fix_issues): @click.option('--fix-issues', default=False, is_flag=True) @click.option('--force', default=False, is_flag=True) def lint(diagnose, fix_issues, force): + """ + Command for linting the code. + """ + + # Helpful option in order to diagnose Clang tidy. if diagnose: clang_tidy_diagnose() + + # In case we are diagnosing, no run is performed. return + # Allowing Clang tidy to attempt to fix issues. Generally, + # it is discouraged to use this features as it may result in + # a catastrophy if fix_issues: if not force: print("""Fixing isssues using Clang Tidy will break your code. @@ -81,6 +101,7 @@ def lint(diagnose, fix_issues, force): print("Wrong answer - stopping!") exit(-1) + # Running the linter logger.info("Invoking linter") lint_main(fix_issues) @@ -89,6 +110,10 @@ def lint(diagnose, fix_issues, force): @click.option('--debug/--no-debug', default=True) @click.option('--generator', default=None) def test(debug, generator): + """ + Command to build and test the code base. + """ + logger.info("Building and testing") build_dir = "Debug" @@ -100,6 +125,11 @@ def test(debug, generator): @cli.command() def runci(): + """ + Command to run all CI commands, starting with style check + then linting and finally unit tests. + """ + build_dir = "Debug" style_check_main(False) @@ -116,13 +146,21 @@ def runci(): default=None, ) def create_pass(name, template): + """ + Helper command to create a new pass from a template. Templates + can be found in the template directory of the TasksCI tool. + """ + # Checking whether the target already exists target_dir = os.path.join(SOURCE_DIR, "libs", name) if os.path.exists(target_dir): logger.error("Pass '{}' already exists".format(name)) exit(-1) + # In case no template was specified, we list the option + # such that the user can choose one if template is None: + # Listing options options = [] print("Available templates:") @@ -135,6 +173,7 @@ def create_pass(name, template): pretty_template_name = re.sub(r'(? len(options) + 1: @@ -148,15 +187,22 @@ def create_pass(name, template): # Getting the template template = options[n - 1] + # Checking that the template is valid. Note that even though + # we list the templates above, the user may have specified an + # invalid template via the command line. template_dir = os.path.join(TEMPLATE_DIR, template) if not os.path.exists(template_dir): logger.error("Template does not exist") exit(-1) + # Creating an operation name by transforming the original name + # from "CamelCase" to "camel-case" operation_name = re.sub(r'(? Date: Thu, 22 Jul 2021 23:10:06 +0200 Subject: [PATCH 24/48] Small refactor --- src/{QsPasses => Passes}/.clang-format | 0 src/{QsPasses => Passes}/.clang-tidy | 0 src/{QsPasses => Passes}/CMakeLists.txt | 0 src/{QsPasses => Passes}/CONTRIBUTING.md | 0 src/{QsPasses => Passes}/Makefile | 0 src/{QsPasses => Passes}/README.md | 0 src/{QsPasses => Passes}/docs/index.md | 0 .../examples/ClassicalIrCommandline/Makefile | 0 .../examples/ClassicalIrCommandline/README.md | 0 .../classical-program.c | 0 src/{QsPasses => Passes}/include/Llvm.hpp | 0 src/{QsPasses => Passes}/libs/CMakeLists.txt | 0 .../libs/OpsCounter/OpsCounter.cpp | 0 .../libs/OpsCounter/OpsCounter.hpp | 0 src/{QsPasses => Passes}/manage | 0 src/{QsPasses => Passes}/requirements.txt | 0 .../site-packages/TasksCI/__main__.py | 0 .../site-packages/TasksCI/builder.py | 0 .../site-packages/TasksCI/cli.py | 0 .../site-packages/TasksCI/formatting.py | 0 .../site-packages/TasksCI/linting.py | 23 ++++++++++++++++--- .../site-packages/TasksCI/settings.py | 0 .../templates/FunctionPass/SPECIFICATION.md | 0 .../templates/FunctionPass/{name}.cpp.tpl | 0 .../templates/FunctionPass/{name}.hpp.tpl | 0 .../site-packages/TasksCI/toolchain.py | 0 26 files changed, 20 insertions(+), 3 deletions(-) rename src/{QsPasses => Passes}/.clang-format (100%) rename src/{QsPasses => Passes}/.clang-tidy (100%) rename src/{QsPasses => Passes}/CMakeLists.txt (100%) rename src/{QsPasses => Passes}/CONTRIBUTING.md (100%) rename src/{QsPasses => Passes}/Makefile (100%) rename src/{QsPasses => Passes}/README.md (100%) rename src/{QsPasses => Passes}/docs/index.md (100%) rename src/{QsPasses => Passes}/examples/ClassicalIrCommandline/Makefile (100%) rename src/{QsPasses => Passes}/examples/ClassicalIrCommandline/README.md (100%) rename src/{QsPasses => Passes}/examples/ClassicalIrCommandline/classical-program.c (100%) rename src/{QsPasses => Passes}/include/Llvm.hpp (100%) rename src/{QsPasses => Passes}/libs/CMakeLists.txt (100%) rename src/{QsPasses => Passes}/libs/OpsCounter/OpsCounter.cpp (100%) rename src/{QsPasses => Passes}/libs/OpsCounter/OpsCounter.hpp (100%) rename src/{QsPasses => Passes}/manage (100%) rename src/{QsPasses => Passes}/requirements.txt (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/__main__.py (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/builder.py (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/cli.py (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/formatting.py (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/linting.py (79%) rename src/{QsPasses => Passes}/site-packages/TasksCI/settings.py (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/templates/FunctionPass/SPECIFICATION.md (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl (100%) rename src/{QsPasses => Passes}/site-packages/TasksCI/toolchain.py (100%) diff --git a/src/QsPasses/.clang-format b/src/Passes/.clang-format similarity index 100% rename from src/QsPasses/.clang-format rename to src/Passes/.clang-format diff --git a/src/QsPasses/.clang-tidy b/src/Passes/.clang-tidy similarity index 100% rename from src/QsPasses/.clang-tidy rename to src/Passes/.clang-tidy diff --git a/src/QsPasses/CMakeLists.txt b/src/Passes/CMakeLists.txt similarity index 100% rename from src/QsPasses/CMakeLists.txt rename to src/Passes/CMakeLists.txt diff --git a/src/QsPasses/CONTRIBUTING.md b/src/Passes/CONTRIBUTING.md similarity index 100% rename from src/QsPasses/CONTRIBUTING.md rename to src/Passes/CONTRIBUTING.md diff --git a/src/QsPasses/Makefile b/src/Passes/Makefile similarity index 100% rename from src/QsPasses/Makefile rename to src/Passes/Makefile diff --git a/src/QsPasses/README.md b/src/Passes/README.md similarity index 100% rename from src/QsPasses/README.md rename to src/Passes/README.md diff --git a/src/QsPasses/docs/index.md b/src/Passes/docs/index.md similarity index 100% rename from src/QsPasses/docs/index.md rename to src/Passes/docs/index.md diff --git a/src/QsPasses/examples/ClassicalIrCommandline/Makefile b/src/Passes/examples/ClassicalIrCommandline/Makefile similarity index 100% rename from src/QsPasses/examples/ClassicalIrCommandline/Makefile rename to src/Passes/examples/ClassicalIrCommandline/Makefile diff --git a/src/QsPasses/examples/ClassicalIrCommandline/README.md b/src/Passes/examples/ClassicalIrCommandline/README.md similarity index 100% rename from src/QsPasses/examples/ClassicalIrCommandline/README.md rename to src/Passes/examples/ClassicalIrCommandline/README.md diff --git a/src/QsPasses/examples/ClassicalIrCommandline/classical-program.c b/src/Passes/examples/ClassicalIrCommandline/classical-program.c similarity index 100% rename from src/QsPasses/examples/ClassicalIrCommandline/classical-program.c rename to src/Passes/examples/ClassicalIrCommandline/classical-program.c diff --git a/src/QsPasses/include/Llvm.hpp b/src/Passes/include/Llvm.hpp similarity index 100% rename from src/QsPasses/include/Llvm.hpp rename to src/Passes/include/Llvm.hpp diff --git a/src/QsPasses/libs/CMakeLists.txt b/src/Passes/libs/CMakeLists.txt similarity index 100% rename from src/QsPasses/libs/CMakeLists.txt rename to src/Passes/libs/CMakeLists.txt diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.cpp b/src/Passes/libs/OpsCounter/OpsCounter.cpp similarity index 100% rename from src/QsPasses/libs/OpsCounter/OpsCounter.cpp rename to src/Passes/libs/OpsCounter/OpsCounter.cpp diff --git a/src/QsPasses/libs/OpsCounter/OpsCounter.hpp b/src/Passes/libs/OpsCounter/OpsCounter.hpp similarity index 100% rename from src/QsPasses/libs/OpsCounter/OpsCounter.hpp rename to src/Passes/libs/OpsCounter/OpsCounter.hpp diff --git a/src/QsPasses/manage b/src/Passes/manage similarity index 100% rename from src/QsPasses/manage rename to src/Passes/manage diff --git a/src/QsPasses/requirements.txt b/src/Passes/requirements.txt similarity index 100% rename from src/QsPasses/requirements.txt rename to src/Passes/requirements.txt diff --git a/src/QsPasses/site-packages/TasksCI/__main__.py b/src/Passes/site-packages/TasksCI/__main__.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/__main__.py rename to src/Passes/site-packages/TasksCI/__main__.py diff --git a/src/QsPasses/site-packages/TasksCI/builder.py b/src/Passes/site-packages/TasksCI/builder.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/builder.py rename to src/Passes/site-packages/TasksCI/builder.py diff --git a/src/QsPasses/site-packages/TasksCI/cli.py b/src/Passes/site-packages/TasksCI/cli.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/cli.py rename to src/Passes/site-packages/TasksCI/cli.py diff --git a/src/QsPasses/site-packages/TasksCI/formatting.py b/src/Passes/site-packages/TasksCI/formatting.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/formatting.py rename to src/Passes/site-packages/TasksCI/formatting.py diff --git a/src/QsPasses/site-packages/TasksCI/linting.py b/src/Passes/site-packages/TasksCI/linting.py similarity index 79% rename from src/QsPasses/site-packages/TasksCI/linting.py rename to src/Passes/site-packages/TasksCI/linting.py index 4a692ab302..659be918e6 100644 --- a/src/QsPasses/site-packages/TasksCI/linting.py +++ b/src/Passes/site-packages/TasksCI/linting.py @@ -13,12 +13,19 @@ def clang_tidy_diagnose(): + """ + Helper function to print the configuration of Clang tidy + """ + + # Getting the config config = subprocess.check_output( [toolchain.discover_tidy(), '-dump-config'], cwd=PROJECT_ROOT).decode() + # Getting the list of checks check_list = subprocess.check_output( [toolchain.discover_tidy(), '-list-checks'], cwd=PROJECT_ROOT).decode() + # Printing it all to the user checks = [x.strip() for x in check_list.split("\n") if '-' in x] print("Working directory: {}".format(PROJECT_ROOT)) @@ -26,11 +33,18 @@ def clang_tidy_diagnose(): print(config) print("") print("Clang tidy checks:") + for check in sorted(checks): print(" -", check) -def run_clang_tidy(source_dir, build_dir, filename, fix_issues: bool = False): +def run_clang_tidy(build_dir, filename, fix_issues: bool = False): + """ + Function that runs Clang tidy for a single file given a build directory + and a filename. + """ + + # Configuring the command line arguments clang_tidy_binary = toolchain.discover_tidy() cmd = [clang_tidy_binary] @@ -46,6 +60,7 @@ def run_clang_tidy(source_dir, build_dir, filename, fix_issues: bool = False): cmd.append(filename) + # Getting the output p = subprocess.Popen( cmd, stdout=subprocess.PIPE, @@ -59,8 +74,10 @@ def run_clang_tidy(source_dir, build_dir, filename, fix_issues: bool = False): output = output.decode() err = err.decode() + # The return value is negative even if the user code is without + # errors, so we check whether there are any errors specified in + # error output if "error" in err: - # TODO(TFR): write output and errors to temp log file sys.stderr.write(output) sys.stderr.write(err) @@ -101,7 +118,7 @@ def main_cpp(fix_issues: bool): success = True for filename in files_to_analyse: - success = success and run_clang_tidy(source_dir, build_dir, filename, fix_issues=fix_issues) + success = success and run_clang_tidy(build_dir, filename, fix_issues=fix_issues) return success diff --git a/src/QsPasses/site-packages/TasksCI/settings.py b/src/Passes/site-packages/TasksCI/settings.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/settings.py rename to src/Passes/site-packages/TasksCI/settings.py diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/SPECIFICATION.md b/src/Passes/site-packages/TasksCI/templates/FunctionPass/SPECIFICATION.md similarity index 100% rename from src/QsPasses/site-packages/TasksCI/templates/FunctionPass/SPECIFICATION.md rename to src/Passes/site-packages/TasksCI/templates/FunctionPass/SPECIFICATION.md diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl similarity index 100% rename from src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl rename to src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl diff --git a/src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl similarity index 100% rename from src/QsPasses/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl rename to src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl diff --git a/src/QsPasses/site-packages/TasksCI/toolchain.py b/src/Passes/site-packages/TasksCI/toolchain.py similarity index 100% rename from src/QsPasses/site-packages/TasksCI/toolchain.py rename to src/Passes/site-packages/TasksCI/toolchain.py From a23b6b716588a80fd229fb4723c1a60f851f3a16 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 23 Jul 2021 11:08:52 +0200 Subject: [PATCH 25/48] Adding QIR example using opt for optimisation and refactoring library structure --- .../examples/ClassicalIrCommandline/Makefile | 6 + .../classical-program.ll | 123 ++++++++++++++ .../examples/OptimisationUsingOpt/README.md | 58 +++++++ .../SimpleExample/Makefile | 5 + .../SimpleExample/SimpleExample.csproj | 9 + .../SimpleExample/SimpleExample.qs | 10 ++ src/Passes/libs/CMakeLists.txt | 7 +- src/Passes/libs/OpsCounter/LibOpsCounter.cpp | 47 ++++++ src/Passes/libs/OpsCounter/OpsCounter.cpp | 155 +++++++----------- src/Passes/libs/OpsCounter/OpsCounter.hpp | 106 ++++++------ src/Passes/libs/OpsCounter/SPECIFICATION.md | 0 .../templates/FunctionPass/Lib{name}.cpp.tpl | 38 +++++ .../templates/FunctionPass/{name}.cpp.tpl | 31 +--- 13 files changed, 411 insertions(+), 184 deletions(-) create mode 100644 src/Passes/examples/ClassicalIrCommandline/classical-program.ll create mode 100644 src/Passes/examples/OptimisationUsingOpt/README.md create mode 100644 src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile create mode 100644 src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.csproj create mode 100644 src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.qs create mode 100644 src/Passes/libs/OpsCounter/LibOpsCounter.cpp create mode 100644 src/Passes/libs/OpsCounter/SPECIFICATION.md create mode 100644 src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl diff --git a/src/Passes/examples/ClassicalIrCommandline/Makefile b/src/Passes/examples/ClassicalIrCommandline/Makefile index f50340c5a5..2f39e8c4a4 100644 --- a/src/Passes/examples/ClassicalIrCommandline/Makefile +++ b/src/Passes/examples/ClassicalIrCommandline/Makefile @@ -1,3 +1,9 @@ +emit-llvm-cpp: + clang -O3 -S -std=c++17 -emit-llvm classical-program.cpp -o classical-program.ll + +emit-llvm-cpp-bin: + clang++ -O3 -std=c++17 -stdlib=libc++ classical-program.cpp -o a.out + emit-llvm: clang -O0 -S -emit-llvm classical-program.c -o classical-program.ll diff --git a/src/Passes/examples/ClassicalIrCommandline/classical-program.ll b/src/Passes/examples/ClassicalIrCommandline/classical-program.ll new file mode 100644 index 0000000000..5ad71d9d0b --- /dev/null +++ b/src/Passes/examples/ClassicalIrCommandline/classical-program.ll @@ -0,0 +1,123 @@ +; ModuleID = 'classical-program.cpp' +source_filename = "classical-program.cpp" +target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx11.0.0" + +%"class.std::__1::basic_ostream" = type { i32 (...)**, %"class.std::__1::basic_ios.base" } +%"class.std::__1::basic_ios.base" = type <{ %"class.std::__1::ios_base", %"class.std::__1::basic_ostream"*, i32 }> +%"class.std::__1::ios_base" = type { i32 (...)**, i32, i64, i64, i32, i32, i8*, i8*, void (i32, %"class.std::__1::ios_base"*, i32)**, i32*, i64, i64, i64*, i64, i64, i8**, i64, i64 } +%"class.std::__1::locale::id" = type <{ %"struct.std::__1::once_flag", i32, [4 x i8] }> +%"struct.std::__1::once_flag" = type { i64 } +%"class.std::__1::locale" = type { %"class.std::__1::locale::__imp"* } +%"class.std::__1::locale::__imp" = type opaque +%"class.std::__1::locale::facet" = type { %"class.std::__1::__shared_count" } +%"class.std::__1::__shared_count" = type { i32 (...)**, i64 } +%"class.std::__1::ctype" = type <{ %"class.std::__1::locale::facet", i32*, i8, [7 x i8] }> + +@_ZNSt3__14coutE = external global %"class.std::__1::basic_ostream", align 8 +@_ZNSt3__15ctypeIcE2idE = external global %"class.std::__1::locale::id", align 8 + +; Function Attrs: norecurse ssp uwtable mustprogress +define dso_local i32 @main() local_unnamed_addr #0 personality i32 (...)* @__gxx_personality_v0 { + %1 = alloca %"class.std::__1::locale", align 8 + %2 = tail call i32 @_Z9fibonaccii(i32 3) + %3 = tail call nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEi(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8) @_ZNSt3__14coutE, i32 %2) + %4 = bitcast %"class.std::__1::basic_ostream"* %3 to i8** + %5 = load i8*, i8** %4, align 8, !tbaa !3 + %6 = getelementptr i8, i8* %5, i64 -24 + %7 = bitcast i8* %6 to i64* + %8 = load i64, i64* %7, align 8 + %9 = bitcast %"class.std::__1::basic_ostream"* %3 to i8* + %10 = getelementptr inbounds i8, i8* %9, i64 %8 + %11 = bitcast %"class.std::__1::locale"* %1 to i8* + call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %11) #5 + %12 = bitcast i8* %10 to %"class.std::__1::ios_base"* + call void @_ZNKSt3__18ios_base6getlocEv(%"class.std::__1::locale"* nonnull sret(%"class.std::__1::locale") align 8 %1, %"class.std::__1::ios_base"* nonnull dereferenceable(136) %12) + %13 = invoke %"class.std::__1::locale::facet"* @_ZNKSt3__16locale9use_facetERNS0_2idE(%"class.std::__1::locale"* nonnull dereferenceable(8) %1, %"class.std::__1::locale::id"* nonnull align 8 dereferenceable(12) @_ZNSt3__15ctypeIcE2idE) + to label %14 unwind label %21 + +14: ; preds = %0 + %15 = bitcast %"class.std::__1::locale::facet"* %13 to %"class.std::__1::ctype"* + %16 = bitcast %"class.std::__1::locale::facet"* %13 to i8 (%"class.std::__1::ctype"*, i8)*** + %17 = load i8 (%"class.std::__1::ctype"*, i8)**, i8 (%"class.std::__1::ctype"*, i8)*** %16, align 8, !tbaa !3 + %18 = getelementptr inbounds i8 (%"class.std::__1::ctype"*, i8)*, i8 (%"class.std::__1::ctype"*, i8)** %17, i64 7 + %19 = load i8 (%"class.std::__1::ctype"*, i8)*, i8 (%"class.std::__1::ctype"*, i8)** %18, align 8 + %20 = invoke signext i8 %19(%"class.std::__1::ctype"* nonnull dereferenceable(25) %15, i8 signext 10) + to label %23 unwind label %21 + +21: ; preds = %14, %0 + %22 = landingpad { i8*, i32 } + cleanup + call void @_ZNSt3__16localeD1Ev(%"class.std::__1::locale"* nonnull dereferenceable(8) %1) #5 + call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %11) #5 + resume { i8*, i32 } %22 + +23: ; preds = %14 + call void @_ZNSt3__16localeD1Ev(%"class.std::__1::locale"* nonnull dereferenceable(8) %1) #5 + call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %11) #5 + %24 = call nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE3putEc(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8) %3, i8 signext %20) + %25 = call nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE5flushEv(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8) %3) + ret i32 0 +} + +declare nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEi(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8), i32) local_unnamed_addr #1 + +; Function Attrs: ssp uwtable mustprogress +define linkonce_odr i32 @_Z9fibonaccii(i32 %0) local_unnamed_addr #2 { + %2 = icmp slt i32 %0, 2 + br i1 %2, label %13, label %3 + +3: ; preds = %1, %3 + %4 = phi i32 [ %8, %3 ], [ %0, %1 ] + %5 = phi i32 [ %9, %3 ], [ 0, %1 ] + %6 = add nsw i32 %4, -1 + %7 = tail call i32 @_Z9fibonaccii(i32 %6) + %8 = add nsw i32 %4, -2 + %9 = add nsw i32 %7, %5 + %10 = icmp slt i32 %4, 4 + br i1 %10, label %11, label %3 + +11: ; preds = %3 + %12 = add i32 %9, 1 + br label %13 + +13: ; preds = %11, %1 + %14 = phi i32 [ 1, %1 ], [ %12, %11 ] + ret i32 %14 +} + +declare nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE3putEc(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8), i8 signext) local_unnamed_addr #1 + +declare nonnull align 8 dereferenceable(8) %"class.std::__1::basic_ostream"* @_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE5flushEv(%"class.std::__1::basic_ostream"* nonnull dereferenceable(8)) local_unnamed_addr #1 + +; Function Attrs: argmemonly nofree nosync nounwind willreturn +declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #3 + +declare void @_ZNKSt3__18ios_base6getlocEv(%"class.std::__1::locale"* sret(%"class.std::__1::locale") align 8, %"class.std::__1::ios_base"* nonnull dereferenceable(136)) local_unnamed_addr #1 + +declare i32 @__gxx_personality_v0(...) + +; Function Attrs: nounwind +declare void @_ZNSt3__16localeD1Ev(%"class.std::__1::locale"* nonnull dereferenceable(8)) unnamed_addr #4 + +; Function Attrs: argmemonly nofree nosync nounwind willreturn +declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #3 + +declare %"class.std::__1::locale::facet"* @_ZNKSt3__16locale9use_facetERNS0_2idE(%"class.std::__1::locale"* nonnull dereferenceable(8), %"class.std::__1::locale::id"* nonnull align 8 dereferenceable(12)) local_unnamed_addr #1 + +attributes #0 = { norecurse ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #2 = { ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #3 = { argmemonly nofree nosync nounwind willreturn } +attributes #4 = { nounwind "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #5 = { nounwind } + +!llvm.module.flags = !{!0, !1} +!llvm.ident = !{!2} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{!"Homebrew clang version 12.0.1"} +!3 = !{!4, !4, i64 0} +!4 = !{!"vtable pointer", !5, i64 0} +!5 = !{!"Simple C++ TBAA"} diff --git a/src/Passes/examples/OptimisationUsingOpt/README.md b/src/Passes/examples/OptimisationUsingOpt/README.md new file mode 100644 index 0000000000..741c555ea3 --- /dev/null +++ b/src/Passes/examples/OptimisationUsingOpt/README.md @@ -0,0 +1,58 @@ +# Optimisation Using Opt + +In this document, we give a brief introduction on how to perform IR optimisations +using `opt`. + +## Stripping dead code + +We start out by considering a simple case of a program that just returns 0: + +```qsharp +namespace Example { + @EntryPoint() + operation OurAwesomeQuantumProgram(nQubits : Int) : Int { + + return 0; + } +} +``` + +You find the code for this in the folder `SimpleExample`. To generate a QIR for this code, go to the folder and run + +```sh +% cd SimpleExample/ +% dotnet clean SimpleExample.csproj +(...) +% dotnet build SimpleExample.csproj -c Debug +``` + +If everything went well, you should now have a subdirectory called `qir` and inside `qir`, you will find `SimpleExample.ll`. Depending on the version of Q#, +the generated QIR will vary, but in general, it will be relatively long. Looking at this file, you will see +that the total length is a little above 2000 lines of code. That is pretty extensive for a program which essentially +does nothing so obviously, most of the generated QIR must be dead code. We can now use `opt` to get rid of the dead code and we do this by invoking: + +```sh +opt -S qir/SimpleExample.ll -O3 > qir/SimpleExample-O3.ll +``` + +All going well, this should reduce your QIR to + +```language +; Function Attrs: norecurse nounwind readnone willreturn +define i64 @Example__QuantumFunction__Interop(i64 %nQubits) local_unnamed_addr #0 { +entry: + ret i64 0 +} + +define void @Example__QuantumFunction(i64 %nQubits) local_unnamed_addr #1 { +entry: + %0 = tail call %String* @__quantum__rt__int_to_string(i64 0) + tail call void @__quantum__rt__message(%String* %0) + tail call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} +``` + +plus a few extra delcarations. + +## Applying a pass diff --git a/src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile new file mode 100644 index 0000000000..0de5d94b56 --- /dev/null +++ b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile @@ -0,0 +1,5 @@ +clean: + rm -rf bin + rm -rf obj + rm -rf qir + \ No newline at end of file diff --git a/src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.csproj b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.csproj new file mode 100644 index 0000000000..eeab572589 --- /dev/null +++ b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.csproj @@ -0,0 +1,9 @@ + + + + Exe + netcoreapp3.1 + true + + + diff --git a/src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.qs b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.qs new file mode 100644 index 0000000000..5578530c60 --- /dev/null +++ b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/SimpleExample.qs @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Example { + @EntryPoint() + operation QuantumFunction(nQubits : Int) : Int { + + return 0; + } +} diff --git a/src/Passes/libs/CMakeLists.txt b/src/Passes/libs/CMakeLists.txt index 578a55e711..700281c6a6 100644 --- a/src/Passes/libs/CMakeLists.txt +++ b/src/Passes/libs/CMakeLists.txt @@ -10,9 +10,9 @@ macro(list_qs_passes result) set(${result} ${dirlist}) endmacro() -list_qs_passes(QS_PASSES) +list_qs_passes(ALL_PASSES) -foreach(pass_plugin ${QS_PASSES}) +foreach(pass_plugin ${ALL_PASSES}) # Getting sources file(GLOB sources RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/${pass_plugin}/*.cpp) @@ -41,3 +41,6 @@ foreach(pass_plugin ${QS_PASSES}) "$<$:-undefined dynamic_lookup>") endforeach() + + +# add_library(passes SHARED ${ALL_PASSES}) \ No newline at end of file diff --git a/src/Passes/libs/OpsCounter/LibOpsCounter.cpp b/src/Passes/libs/OpsCounter/LibOpsCounter.cpp new file mode 100644 index 0000000000..8197658a25 --- /dev/null +++ b/src/Passes/libs/OpsCounter/LibOpsCounter.cpp @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" +#include "OpsCounter/OpsCounter.hpp" + +#include +#include + +namespace { +// Interface to plugin +llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() +{ + using namespace Microsoft::Quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the printer + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "print") + { + fpm.addPass(COpsCounterPrinter(llvm::errs())); + return true; + } + return false; + }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { + fpm.addPass(COpsCounterPrinter(llvm::errs())); + }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { + fam.registerPass([] { return COpsCounterAnalytics(); }); + }); + }}; +} + +} // namespace + +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return GetOpsCounterPluginInfo(); +} diff --git a/src/Passes/libs/OpsCounter/OpsCounter.cpp b/src/Passes/libs/OpsCounter/OpsCounter.cpp index b2030f7c91..b2df78f4b8 100644 --- a/src/Passes/libs/OpsCounter/OpsCounter.cpp +++ b/src/Passes/libs/OpsCounter/OpsCounter.cpp @@ -7,113 +7,72 @@ #include #include -using namespace llvm; -namespace Microsoft +namespace Microsoft { +namespace Quantum { +COpsCounterAnalytics::Result COpsCounterAnalytics::run(llvm::Function &function, + llvm::FunctionAnalysisManager & /*unused*/) { -namespace Quantum -{ - COpsCounterAnalytics::Result COpsCounterAnalytics::run( - llvm::Function& function, - llvm::FunctionAnalysisManager& /*unused*/) - { - COpsCounterAnalytics::Result opcode_map; - for (auto& basic_block : function) - { - for (auto& instruction : basic_block) - { - if (instruction.isDebugOrPseudoInst()) - { - continue; - } - auto name = instruction.getOpcodeName(); - - if (opcode_map.find(name) == opcode_map.end()) - { - opcode_map[instruction.getOpcodeName()] = 1; - } - else - { - opcode_map[instruction.getOpcodeName()]++; - } - } - } - - return opcode_map; - } - - COpsCounterPrinter::COpsCounterPrinter(llvm::raw_ostream& out_stream) - : out_stream_(out_stream) + COpsCounterAnalytics::Result opcode_map; + for (auto &basic_block : function) + { + for (auto &instruction : basic_block) { + if (instruction.isDebugOrPseudoInst()) + { + continue; + } + auto name = instruction.getOpcodeName(); + + if (opcode_map.find(name) == opcode_map.end()) + { + opcode_map[instruction.getOpcodeName()] = 1; + } + else + { + opcode_map[instruction.getOpcodeName()]++; + } } + } - llvm::PreservedAnalyses COpsCounterPrinter::run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) - { - auto& opcode_map = fam.getResult(function); - - out_stream_ << "Stats for '" << function.getName() << "'\n"; - out_stream_ << "===========================\n"; - - constexpr auto str1 = "Opcode"; - constexpr auto str2 = "# Used"; - out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); - out_stream_ << "---------------------------" - << "\n"; - - for (auto const& instruction : opcode_map) - { - out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); - } - out_stream_ << "---------------------------" - << "\n\n"; - - return llvm::PreservedAnalyses::all(); - } - - bool COpsCounterPrinter::isRequired() - { - return true; - } + return opcode_map; +} - llvm::AnalysisKey COpsCounterAnalytics::Key; -} // namespace Quantum -} // namespace Microsoft +COpsCounterPrinter::COpsCounterPrinter(llvm::raw_ostream &out_stream) + : out_stream_(out_stream) +{} -// Interface to plugin -namespace -{ -llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() +llvm::PreservedAnalyses COpsCounterPrinter::run(llvm::Function & function, + llvm::FunctionAnalysisManager &fam) { - using namespace Microsoft::Quantum; - - return { - LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, - [](PassBuilder& pb) - { - // Registering the printer - pb.registerPipelineParsingCallback( - [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) - { - if (name == "print") - { - fpm.addPass(COpsCounterPrinter(llvm::errs())); - return true; - } - return false; - }); - - pb.registerVectorizerStartEPCallback( - [](llvm::FunctionPassManager& fpm, llvm::PassBuilder::OptimizationLevel /*level*/) - { fpm.addPass(COpsCounterPrinter(llvm::errs())); }); - - // Registering the analysis module - pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager& fam) - { fam.registerPass([] { return COpsCounterAnalytics(); }); }); - }}; + auto &opcode_map = fam.getResult(function); + + out_stream_ << "Stats for '" << function.getName() << "'\n"; + out_stream_ << "===========================\n"; + + constexpr auto str1 = "Opcode"; + constexpr auto str2 = "# Used"; + out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); + out_stream_ << "---------------------------" + << "\n"; + + for (auto const &instruction : opcode_map) + { + out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), + instruction.second); + } + out_stream_ << "---------------------------" + << "\n\n"; + + return llvm::PreservedAnalyses::all(); } -} // namespace -extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +bool COpsCounterPrinter::isRequired() { - return GetOpsCounterPluginInfo(); + return true; } + +llvm::AnalysisKey COpsCounterAnalytics::Key; + +} // namespace Quantum +} // namespace Microsoft diff --git a/src/Passes/libs/OpsCounter/OpsCounter.hpp b/src/Passes/libs/OpsCounter/OpsCounter.hpp index 0662766e59..4978b10725 100644 --- a/src/Passes/libs/OpsCounter/OpsCounter.hpp +++ b/src/Passes/libs/OpsCounter/OpsCounter.hpp @@ -4,65 +4,63 @@ #include "Llvm.hpp" -namespace Microsoft -{ -namespace Quantum -{ +namespace Microsoft { +namespace Quantum { - class COpsCounterAnalytics : public llvm::AnalysisInfoMixin - { - public: - using Result = llvm::StringMap; +class COpsCounterAnalytics : public llvm::AnalysisInfoMixin +{ +public: + using Result = llvm::StringMap; - /// Constructors and destructors - /// @{ - COpsCounterAnalytics() = default; - COpsCounterAnalytics(COpsCounterAnalytics const&) = delete; - COpsCounterAnalytics(COpsCounterAnalytics&&) = default; - ~COpsCounterAnalytics() = default; - /// @} + /// Constructors and destructors + /// @{ + COpsCounterAnalytics() = default; + COpsCounterAnalytics(COpsCounterAnalytics const &) = delete; + COpsCounterAnalytics(COpsCounterAnalytics &&) = default; + ~COpsCounterAnalytics() = default; + /// @} - /// Operators - /// @{ - COpsCounterAnalytics& operator=(COpsCounterAnalytics const&) = delete; - COpsCounterAnalytics& operator=(COpsCounterAnalytics&&) = delete; - /// @} + /// Operators + /// @{ + COpsCounterAnalytics &operator=(COpsCounterAnalytics const &) = delete; + COpsCounterAnalytics &operator=(COpsCounterAnalytics &&) = delete; + /// @} - /// Functions required by LLVM - /// @{ - Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); - /// @} - private: - static llvm::AnalysisKey Key; - friend struct llvm::AnalysisInfoMixin; - }; + /// Functions required by LLVM + /// @{ + Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/); + /// @} +private: + static llvm::AnalysisKey Key; + friend struct llvm::AnalysisInfoMixin; +}; - class COpsCounterPrinter : public llvm::PassInfoMixin - { - public: - /// Constructors and destructors - /// @{ - explicit COpsCounterPrinter(llvm::raw_ostream& out_stream); - COpsCounterPrinter() = delete; - COpsCounterPrinter(COpsCounterPrinter const&) = delete; - COpsCounterPrinter(COpsCounterPrinter&&) = default; - ~COpsCounterPrinter() = default; - /// @} +class COpsCounterPrinter : public llvm::PassInfoMixin +{ +public: + /// Constructors and destructors + /// @{ + explicit COpsCounterPrinter(llvm::raw_ostream &out_stream); + COpsCounterPrinter() = delete; + COpsCounterPrinter(COpsCounterPrinter const &) = delete; + COpsCounterPrinter(COpsCounterPrinter &&) = default; + ~COpsCounterPrinter() = default; + /// @} - /// Operators - /// @{ - COpsCounterPrinter& operator=(COpsCounterPrinter const&) = delete; - COpsCounterPrinter& operator=(COpsCounterPrinter&&) = delete; - /// @} + /// Operators + /// @{ + COpsCounterPrinter &operator=(COpsCounterPrinter const &) = delete; + COpsCounterPrinter &operator=(COpsCounterPrinter &&) = delete; + /// @} - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); - static bool isRequired(); - /// @} - private: - llvm::raw_ostream& out_stream_; - }; + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); + static bool isRequired(); + /// @} +private: + llvm::raw_ostream &out_stream_; +}; -} // namespace Quantum -} // namespace Microsoft +} // namespace Quantum +} // namespace Microsoft diff --git a/src/Passes/libs/OpsCounter/SPECIFICATION.md b/src/Passes/libs/OpsCounter/SPECIFICATION.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl new file mode 100644 index 0000000000..6ed3455886 --- /dev/null +++ b/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "{name}/{name}.hpp" + +#include "Llvm.hpp" + +#include +#include + +namespace { +llvm::PassPluginLibraryInfo Get{name}PluginInfo() +{ + using namespace Microsoft::Quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "{name}", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the pass + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "{operation_name}") + { + fpm.addPass(C{name}Pass()); + return true; + } + + return false; + }); + }}; +} +} + +// Interface for loading the plugin +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return Get{name}PluginInfo(); +} diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl index 7cb82d1f3d..c76a6ef22a 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl @@ -25,36 +25,7 @@ bool C{name}Pass::isRequired() { return true; } + } // namespace Quantum } // namespace Microsoft -// Helper functions which we do not expose externally -namespace { -llvm::PassPluginLibraryInfo Get{name}PluginInfo() -{ - using namespace Microsoft::Quantum; - using namespace llvm; - - return { - LLVM_PLUGIN_API_VERSION, "{name}", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the pass - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "{operation_name}") - { - fpm.addPass(C{name}Pass()); - return true; - } - - return false; - }); - }}; -} -} - - -// Interface for loading the plugin -extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() -{ - return Get{name}PluginInfo(); -} From 327ff7f326830f386876eae378718f21761d6c5b Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 23 Jul 2021 11:11:29 +0200 Subject: [PATCH 26/48] Adding documentation --- src/Passes/site-packages/TasksCI/linting.py | 5 +++++ src/Passes/site-packages/TasksCI/settings.py | 4 ++++ src/Passes/site-packages/TasksCI/toolchain.py | 12 ++++++++++++ 3 files changed, 21 insertions(+) diff --git a/src/Passes/site-packages/TasksCI/linting.py b/src/Passes/site-packages/TasksCI/linting.py index 659be918e6..09b3647df4 100644 --- a/src/Passes/site-packages/TasksCI/linting.py +++ b/src/Passes/site-packages/TasksCI/linting.py @@ -89,6 +89,11 @@ def run_clang_tidy(build_dir, filename, fix_issues: bool = False): def main_cpp(fix_issues: bool): + """ + Main function for C++ linting. This function builds and lints + the code. + """ + logger.info("Linting") build_dir = os.path.join(PROJECT_ROOT, "Debug") source_dir = os.path.join(PROJECT_ROOT, "src") diff --git a/src/Passes/site-packages/TasksCI/settings.py b/src/Passes/site-packages/TasksCI/settings.py index 85d5b667f2..6db2e1fbf4 100644 --- a/src/Passes/site-packages/TasksCI/settings.py +++ b/src/Passes/site-packages/TasksCI/settings.py @@ -10,4 +10,8 @@ def get_concurrency(): + """ + Function that gives a default concurrency for the compilation + and testing process. + """ return min(MAX_CONCURRENCY, multiprocessing.cpu_count()) diff --git a/src/Passes/site-packages/TasksCI/toolchain.py b/src/Passes/site-packages/TasksCI/toolchain.py index 60ceb80b4a..52757ff349 100644 --- a/src/Passes/site-packages/TasksCI/toolchain.py +++ b/src/Passes/site-packages/TasksCI/toolchain.py @@ -5,16 +5,28 @@ def discover_formatter(): + """ + Finds the clang-format executable + """ return shutil.which("clang-format") def discover_tidy(): + """ + Finds the clang-tidy executable + """ return shutil.which("clang-tidy") def discover_cmake(): + """ + Finds the cmake executable + """ return shutil.which("cmake") def discover_ctest(): + """ + Finds the ctest executable + """ return shutil.which("ctest") From 0ee8249bca9b3181176836c20bf5a46c81a7ca4d Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 23 Jul 2021 12:49:32 +0200 Subject: [PATCH 27/48] Updating linter and formatter --- src/Passes/.clang-format | 13 +- src/Passes/.clang-tidy | 40 +++++- src/Passes/libs/OpsCounter/LibOpsCounter.cpp | 63 +++++----- src/Passes/libs/OpsCounter/OpsCounter.cpp | 116 +++++++++--------- src/Passes/libs/OpsCounter/OpsCounter.hpp | 107 ++++++++-------- src/Passes/site-packages/TasksCI/linting.py | 17 +-- .../templates/FunctionPass/Lib{name}.cpp.tpl | 10 +- .../templates/FunctionPass/{name}.cpp.tpl | 12 +- .../templates/FunctionPass/{name}.hpp.tpl | 22 ++-- 9 files changed, 224 insertions(+), 176 deletions(-) diff --git a/src/Passes/.clang-format b/src/Passes/.clang-format index 329e8f956d..379a41ff3c 100644 --- a/src/Passes/.clang-format +++ b/src/Passes/.clang-format @@ -36,13 +36,20 @@ SpaceBeforeParens: ControlStatements DerivePointerAlignment: false PointerAlignment: Left -# Suggestion +# Suggestions Standard: Cpp11 AlignConsecutiveAssignments: true AlignConsecutiveDeclarations: true AlignTrailingComments: true +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 2 + +IndentCaseLabels: false +# NamespaceIndentation: None + # Ensures include compleness +IncludeBlocks: Regroup IncludeCategories: - Regex: '.*\.\..*' Priority: 1 @@ -57,4 +64,6 @@ IncludeCategories: - Regex: '.*' Priority: 2 IncludeIsMainRegex: '' - +SortIncludes: true +SortUsingDeclarations: true +SpaceInEmptyParentheses: false diff --git a/src/Passes/.clang-tidy b/src/Passes/.clang-tidy index d1f58c04c2..dfec20a924 100644 --- a/src/Passes/.clang-tidy +++ b/src/Passes/.clang-tidy @@ -26,23 +26,51 @@ HeaderFilterRegex: '.*' CheckOptions: - key: readability-identifier-naming.ClassCase value: 'CamelCase' - - key: readability-identifier-naming.ClassPrefix - value: 'C' - key: readability-identifier-naming.AbstractClassPrefix value: 'I' - key: readability-identifier-naming.StructCase value: 'CamelCase' - key: readability-identifier-naming.ParameterCase - value: 'camelBack' + value: 'lower_case' - key: readability-identifier-naming.PrivateMemberCase - value: 'camelBack' + value: 'lower_case' + - key: readability-identifier-naming.PrivateMemberSuffix + value: '_' + - key: readability-identifier-naming.ProtectedMemberCase + value: 'lower_case' + - key: readability-identifier-naming.ProtectedMemberSuffix + value: '_' + - key: readability-identifier-naming.VariableCase + value: 'lower_case' - key: readability-identifier-naming.LocalVariableCase - value: 'camelBack' + value: 'lower_case' - key: readability-identifier-naming.TypeAliasCase value: 'CamelCase' - key: readability-identifier-naming.UnionCase value: 'CamelCase' - key: readability-identifier-naming.FunctionCase - value: 'CamelCase' + value: 'camelBack' - key: readability-identifier-naming.NamespaceCase + value: 'lower_case' + - key: readability-identifier-naming.GlobalConstantCase + value: 'UPPER_CASE' + - key: readability-identifier-naming.EnumCase + value: 'CamelCase' + - key: readability-identifier-naming.EnumConstantCase + value: 'CamelCase' + - key: readability-identifier-naming.GlobalConstantPrefix + value: 'G_' + - key: readability-identifier-naming.ConstantCase + value: 'UPPER_CASE' + - key: readability-identifier-naming.MacroDefinitionCase + value: 'UPPER_CASE' + - key: readability-identifier-naming.TypeAliasCase + value: 'CamelCase' + - key: readability-identifier-naming.TypedefCase value: 'CamelCase' + - key: readability-identifier-naming.IgnoreMainLikeFunctions + value: true + - key: readability-identifier-naming.StaticVariableCase + value: 'lower_case' + - key: readability-identifier-naming.StaticVariablePrefix + value: 'h_' diff --git a/src/Passes/libs/OpsCounter/LibOpsCounter.cpp b/src/Passes/libs/OpsCounter/LibOpsCounter.cpp index 8197658a25..65a7a238b9 100644 --- a/src/Passes/libs/OpsCounter/LibOpsCounter.cpp +++ b/src/Passes/libs/OpsCounter/LibOpsCounter.cpp @@ -2,46 +2,49 @@ // Licensed under the MIT License. #include "Llvm.hpp" + #include "OpsCounter/OpsCounter.hpp" #include #include -namespace { +namespace +{ // Interface to plugin -llvm::PassPluginLibraryInfo GetOpsCounterPluginInfo() +llvm::PassPluginLibraryInfo getOpsCounterPluginInfo() { - using namespace Microsoft::Quantum; - using namespace llvm; - - return { - LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the printer - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "print") - { - fpm.addPass(COpsCounterPrinter(llvm::errs())); - return true; - } - return false; - }); - - pb.registerVectorizerStartEPCallback( - [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { - fpm.addPass(COpsCounterPrinter(llvm::errs())); - }); - - // Registering the analysis module - pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { - fam.registerPass([] { return COpsCounterAnalytics(); }); - }); - }}; + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "OpsCounter", LLVM_VERSION_STRING, + [](PassBuilder& pb) + { + // Registering the printer + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) + { + if (name == "print") + { + fpm.addPass(OpsCounterPrinter(llvm::errs())); + return true; + } + return false; + }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager& fpm, llvm::PassBuilder::OptimizationLevel /*level*/) + { fpm.addPass(OpsCounterPrinter(llvm::errs())); }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager& fam) + { fam.registerPass([] { return OpsCounterAnalytics(); }); }); + }}; } -} // namespace +} // namespace extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return GetOpsCounterPluginInfo(); + return getOpsCounterPluginInfo(); } diff --git a/src/Passes/libs/OpsCounter/OpsCounter.cpp b/src/Passes/libs/OpsCounter/OpsCounter.cpp index b2df78f4b8..f642970b1a 100644 --- a/src/Passes/libs/OpsCounter/OpsCounter.cpp +++ b/src/Passes/libs/OpsCounter/OpsCounter.cpp @@ -1,78 +1,80 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "OpsCounter/OpsCounter.hpp" - #include "Llvm.hpp" +#include "OpsCounter/OpsCounter.hpp" + #include #include -namespace Microsoft { -namespace Quantum { -COpsCounterAnalytics::Result COpsCounterAnalytics::run(llvm::Function &function, - llvm::FunctionAnalysisManager & /*unused*/) +namespace microsoft { - COpsCounterAnalytics::Result opcode_map; - for (auto &basic_block : function) - { - for (auto &instruction : basic_block) +namespace quantum +{ + OpsCounterAnalytics::Result OpsCounterAnalytics::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& /*unused*/) { - if (instruction.isDebugOrPseudoInst()) - { - continue; - } - auto name = instruction.getOpcodeName(); + OpsCounterAnalytics::Result opcode_map; + for (auto& basic_block : function) + { + for (auto& instruction : basic_block) + { + if (instruction.isDebugOrPseudoInst()) + { + continue; + } + auto name = instruction.getOpcodeName(); - if (opcode_map.find(name) == opcode_map.end()) - { - opcode_map[instruction.getOpcodeName()] = 1; - } - else - { - opcode_map[instruction.getOpcodeName()]++; - } - } - } + if (opcode_map.find(name) == opcode_map.end()) + { + opcode_map[instruction.getOpcodeName()] = 1; + } + else + { + opcode_map[instruction.getOpcodeName()]++; + } + } + } - return opcode_map; -} + return opcode_map; + } -COpsCounterPrinter::COpsCounterPrinter(llvm::raw_ostream &out_stream) - : out_stream_(out_stream) -{} + OpsCounterPrinter::OpsCounterPrinter(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) + { + } -llvm::PreservedAnalyses COpsCounterPrinter::run(llvm::Function & function, - llvm::FunctionAnalysisManager &fam) -{ - auto &opcode_map = fam.getResult(function); + llvm::PreservedAnalyses OpsCounterPrinter::run(llvm::Function& function, llvm::FunctionAnalysisManager& fam) + { + auto& opcode_map = fam.getResult(function); - out_stream_ << "Stats for '" << function.getName() << "'\n"; - out_stream_ << "===========================\n"; + out_stream_ << "Stats for '" << function.getName() << "'\n"; + out_stream_ << "===========================\n"; - constexpr auto str1 = "Opcode"; - constexpr auto str2 = "# Used"; - out_stream_ << llvm::format("%-15s %-8s\n", str1, str2); - out_stream_ << "---------------------------" - << "\n"; + constexpr auto STR1 = "Opcode"; + constexpr auto STR2 = "# Used"; + out_stream_ << llvm::format("%-15s %-8s\n", STR1, STR2); + out_stream_ << "---------------------------" + << "\n"; - for (auto const &instruction : opcode_map) - { - out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), - instruction.second); - } - out_stream_ << "---------------------------" - << "\n\n"; + for (auto const& instruction : opcode_map) + { + out_stream_ << llvm::format("%-15s %-8lu\n", instruction.first().str().c_str(), instruction.second); + } + out_stream_ << "---------------------------" + << "\n\n"; - return llvm::PreservedAnalyses::all(); -} + return llvm::PreservedAnalyses::all(); + } -bool COpsCounterPrinter::isRequired() -{ - return true; -} + bool OpsCounterPrinter::isRequired() + { + return true; + } -llvm::AnalysisKey COpsCounterAnalytics::Key; + llvm::AnalysisKey OpsCounterAnalytics::Key; -} // namespace Quantum -} // namespace Microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/OpsCounter/OpsCounter.hpp b/src/Passes/libs/OpsCounter/OpsCounter.hpp index 4978b10725..09f73362f1 100644 --- a/src/Passes/libs/OpsCounter/OpsCounter.hpp +++ b/src/Passes/libs/OpsCounter/OpsCounter.hpp @@ -4,63 +4,66 @@ #include "Llvm.hpp" -namespace Microsoft { -namespace Quantum { - -class COpsCounterAnalytics : public llvm::AnalysisInfoMixin +namespace microsoft +{ +namespace quantum { -public: - using Result = llvm::StringMap; - /// Constructors and destructors - /// @{ - COpsCounterAnalytics() = default; - COpsCounterAnalytics(COpsCounterAnalytics const &) = delete; - COpsCounterAnalytics(COpsCounterAnalytics &&) = default; - ~COpsCounterAnalytics() = default; - /// @} + class OpsCounterAnalytics : public llvm::AnalysisInfoMixin + { + public: + using Result = llvm::StringMap; - /// Operators - /// @{ - COpsCounterAnalytics &operator=(COpsCounterAnalytics const &) = delete; - COpsCounterAnalytics &operator=(COpsCounterAnalytics &&) = delete; - /// @} + /// Constructors and destructors + /// @{ + OpsCounterAnalytics() = default; + OpsCounterAnalytics(OpsCounterAnalytics const&) = delete; + OpsCounterAnalytics(OpsCounterAnalytics&&) = default; + ~OpsCounterAnalytics() = default; + /// @} - /// Functions required by LLVM - /// @{ - Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/); - /// @} -private: - static llvm::AnalysisKey Key; - friend struct llvm::AnalysisInfoMixin; -}; + /// Operators + /// @{ + OpsCounterAnalytics& operator=(OpsCounterAnalytics const&) = delete; + OpsCounterAnalytics& operator=(OpsCounterAnalytics&&) = delete; + /// @} -class COpsCounterPrinter : public llvm::PassInfoMixin -{ -public: - /// Constructors and destructors - /// @{ - explicit COpsCounterPrinter(llvm::raw_ostream &out_stream); - COpsCounterPrinter() = delete; - COpsCounterPrinter(COpsCounterPrinter const &) = delete; - COpsCounterPrinter(COpsCounterPrinter &&) = default; - ~COpsCounterPrinter() = default; - /// @} + /// Functions required by LLVM + /// @{ + Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); + /// @} + + private: + static llvm::AnalysisKey Key; // NOLINT + friend struct llvm::AnalysisInfoMixin; + }; + + class OpsCounterPrinter : public llvm::PassInfoMixin + { + public: + /// Constructors and destructors + /// @{ + explicit OpsCounterPrinter(llvm::raw_ostream& out_stream); + OpsCounterPrinter() = delete; + OpsCounterPrinter(OpsCounterPrinter const&) = delete; + OpsCounterPrinter(OpsCounterPrinter&&) = default; + ~OpsCounterPrinter() = default; + /// @} - /// Operators - /// @{ - COpsCounterPrinter &operator=(COpsCounterPrinter const &) = delete; - COpsCounterPrinter &operator=(COpsCounterPrinter &&) = delete; - /// @} + /// Operators + /// @{ + OpsCounterPrinter& operator=(OpsCounterPrinter const&) = delete; + OpsCounterPrinter& operator=(OpsCounterPrinter&&) = delete; + /// @} - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); - static bool isRequired(); - /// @} -private: - llvm::raw_ostream &out_stream_; -}; + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} + private: + llvm::raw_ostream& out_stream_; + }; -} // namespace Quantum -} // namespace Microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/site-packages/TasksCI/linting.py b/src/Passes/site-packages/TasksCI/linting.py index 09b3647df4..fc672d8b20 100644 --- a/src/Passes/site-packages/TasksCI/linting.py +++ b/src/Passes/site-packages/TasksCI/linting.py @@ -50,7 +50,7 @@ def run_clang_tidy(build_dir, filename, fix_issues: bool = False): cmd = [clang_tidy_binary] output_file = os.path.abspath(os.path.join(build_dir, 'clang_tidy_fixes.yaml')) - cmd.append('-header-filter=".*(QsPasses)\\/(src).*\\.hpp$"') + cmd.append('-header-filter=".*\\/(Passes)\\/(libs)\\/.*"') cmd.append('-p=' + build_dir) cmd.append('-export-fixes={}'.format(output_file)) cmd.append('--use-color') @@ -60,19 +60,23 @@ def run_clang_tidy(build_dir, filename, fix_issues: bool = False): cmd.append(filename) + logger.info("Running '{}'".format(" ".join(cmd))) + # Getting the output p = subprocess.Popen( - cmd, + " ".join(cmd), stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, - cwd=PROJECT_ROOT) + cwd=PROJECT_ROOT, + shell=True) output, err = p.communicate() + output = output.decode() + err = err.decode() + if p.returncode != 0: - output = output.decode() - err = err.decode() # The return value is negative even if the user code is without # errors, so we check whether there are any errors specified in @@ -96,7 +100,7 @@ def main_cpp(fix_issues: bool): logger.info("Linting") build_dir = os.path.join(PROJECT_ROOT, "Debug") - source_dir = os.path.join(PROJECT_ROOT, "src") + source_dir = os.path.join(PROJECT_ROOT, "libs") generator = None extensions = ["cpp"] @@ -112,7 +116,6 @@ def main_cpp(fix_issues: bool): files_to_analyse = [] for root, dirs, files in os.walk(source_dir): - for filename in files: if "." not in filename: continue diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl index 6ed3455886..3986121e81 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionPass/Lib{name}.cpp.tpl @@ -9,9 +9,9 @@ #include namespace { -llvm::PassPluginLibraryInfo Get{name}PluginInfo() +llvm::PassPluginLibraryInfo get{name}PluginInfo() { - using namespace Microsoft::Quantum; + using namespace microsoft::quantum; using namespace llvm; return { @@ -21,7 +21,7 @@ llvm::PassPluginLibraryInfo Get{name}PluginInfo() ArrayRef /*unused*/) { if (name == "{operation_name}") { - fpm.addPass(C{name}Pass()); + fpm.addPass({name}Pass()); return true; } @@ -29,10 +29,10 @@ llvm::PassPluginLibraryInfo Get{name}PluginInfo() }); }}; } -} +} // namespace // Interface for loading the plugin extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return Get{name}PluginInfo(); + return get{name}PluginInfo(); } diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl index c76a6ef22a..27804b5c1c 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.cpp.tpl @@ -8,11 +8,11 @@ #include #include -namespace Microsoft +namespace microsoft { -namespace Quantum +namespace quantum { -llvm::PreservedAnalyses C{name}Pass::run(llvm::Function &function, llvm::FunctionAnalysisManager &/*fam*/) +llvm::PreservedAnalyses {name}Pass::run(llvm::Function &function, llvm::FunctionAnalysisManager &/*fam*/) { // Pass body @@ -21,11 +21,11 @@ llvm::PreservedAnalyses C{name}Pass::run(llvm::Function &function, llvm::Functio return llvm::PreservedAnalyses::all(); } -bool C{name}Pass::isRequired() +bool {name}Pass::isRequired() { return true; } -} // namespace Quantum -} // namespace Microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl index d413f43e24..24e5beaa7b 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionPass/{name}.hpp.tpl @@ -4,26 +4,26 @@ #include "Llvm.hpp" -namespace Microsoft +namespace microsoft { -namespace Quantum +namespace quantum { -class C{name}Pass : public llvm::PassInfoMixin +class {name}Pass : public llvm::PassInfoMixin<{name}Pass> { public: /// Constructors and destructors /// @{ - C{name}Pass() = default; - C{name}Pass(C{name}Pass const &) = default; - C{name}Pass(C{name}Pass &&) = default; - ~C{name}Pass() = default; + {name}Pass() = default; + {name}Pass({name}Pass const &) = default; + {name}Pass({name}Pass &&) = default; + ~{name}Pass() = default; /// @} /// Operators /// @{ - C{name}Pass &operator=(C{name}Pass const &) = default; - C{name}Pass &operator=(C{name}Pass &&) = default; + {name}Pass &operator=({name}Pass const &) = default; + {name}Pass &operator=({name}Pass &&) = default; /// @} /// Functions required by LLVM @@ -33,5 +33,5 @@ public: /// @} }; -} // namespace Quantum -} // namespace Microsoft +} // namespace quantum +} // namespace microsoft From 6c3c896c6ccdce57a1755fa53f6378fa92c3fa5b Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 09:43:02 +0200 Subject: [PATCH 28/48] Updating code to meet PR comments --- src/Passes/.clang-tidy | 75 +++++++++++++------ src/Passes/CMakeLists.txt | 7 +- src/Passes/Makefile | 3 + src/Passes/README.md | 27 ++++--- .../examples/ClassicalIrCommandline/Makefile | 3 - .../examples/OptimisationUsingOpt/README.md | 2 - src/Passes/manage | 2 + 7 files changed, 76 insertions(+), 43 deletions(-) diff --git a/src/Passes/.clang-tidy b/src/Passes/.clang-tidy index dfec20a924..a0f64b8d21 100644 --- a/src/Passes/.clang-tidy +++ b/src/Passes/.clang-tidy @@ -1,6 +1,7 @@ Checks: "-*,bugprone-*,\ -readability-*,\ readability-identifier-*,\ +readability-redundant-member-init,\ readability-braces-around-statements,\ cert-dcl*,\ cert-env*,\ @@ -16,6 +17,22 @@ google-runtime-operator,\ hicpp-exception-baseclass,\ hicpp-explicit-conversions,\ hicpp-use-*,\ +modernize-avoid-bind,\ +modernize-loop-convert,\ +modernize-make-shared,\ +modernize-make-unique,\ +modernize-redundant-void-arg,\ +modernize-replace-random-shuffle,\ +modernize-shrink-to-fit,\ +modernize-use-bool-literals,\ +modernize-use-default-member-init,\ +modernize-use-emplace,\ +modernize-use-equals-default,\ +modernize-use-equals-delete,\ +modernize-use-noexcept,\ +modernize-use-nullptr,\ +modernize-use-override,\ +modernize-use-transparent-functors,\ misc-*,\ -misc-misplaced-widening-cast,\ performance-*" @@ -24,14 +41,22 @@ WarningsAsErrors: '*' HeaderFilterRegex: '.*' CheckOptions: - - key: readability-identifier-naming.ClassCase - value: 'CamelCase' + # Configuration documentation: https://clang.llvm.org/extra/clang-tidy/checks/readability-identifier-naming.html + # Namespaces + - key: readability-identifier-naming.NamespaceCase + value: 'lower_case' + + # Classes and structs - key: readability-identifier-naming.AbstractClassPrefix value: 'I' + - key: readability-identifier-naming.ClassCase + value: 'CamelCase' - key: readability-identifier-naming.StructCase value: 'CamelCase' - - key: readability-identifier-naming.ParameterCase - value: 'lower_case' + - key: readability-identifier-naming.UnionCase + value: 'CamelCase' + + # Class members - key: readability-identifier-naming.PrivateMemberCase value: 'lower_case' - key: readability-identifier-naming.PrivateMemberSuffix @@ -40,37 +65,39 @@ CheckOptions: value: 'lower_case' - key: readability-identifier-naming.ProtectedMemberSuffix value: '_' - - key: readability-identifier-naming.VariableCase - value: 'lower_case' - - key: readability-identifier-naming.LocalVariableCase - value: 'lower_case' + + # Alias - key: readability-identifier-naming.TypeAliasCase value: 'CamelCase' - - key: readability-identifier-naming.UnionCase + - key: readability-identifier-naming.TypedefCase value: 'CamelCase' + + # Functions - key: readability-identifier-naming.FunctionCase value: 'camelBack' - - key: readability-identifier-naming.NamespaceCase + - key: readability-identifier-naming.IgnoreMainLikeFunctions + value: true + + # Variables and parameters + - key: readability-identifier-naming.VariableCase + value: 'lower_case' + - key: readability-identifier-naming.LocalVariableCase + value: 'lower_case' + - key: readability-identifier-naming.ParameterCase value: 'lower_case' + + # Globals, consts and enums - key: readability-identifier-naming.GlobalConstantCase value: 'UPPER_CASE' - - key: readability-identifier-naming.EnumCase - value: 'CamelCase' - - key: readability-identifier-naming.EnumConstantCase - value: 'CamelCase' - key: readability-identifier-naming.GlobalConstantPrefix value: 'G_' - key: readability-identifier-naming.ConstantCase value: 'UPPER_CASE' - - key: readability-identifier-naming.MacroDefinitionCase - value: 'UPPER_CASE' - - key: readability-identifier-naming.TypeAliasCase + - key: readability-identifier-naming.EnumCase value: 'CamelCase' - - key: readability-identifier-naming.TypedefCase + - key: readability-identifier-naming.EnumConstantCase value: 'CamelCase' - - key: readability-identifier-naming.IgnoreMainLikeFunctions - value: true - - key: readability-identifier-naming.StaticVariableCase - value: 'lower_case' - - key: readability-identifier-naming.StaticVariablePrefix - value: 'h_' + + # Macros + - key: readability-identifier-naming.MacroDefinitionCase + value: 'UPPER_CASE' diff --git a/src/Passes/CMakeLists.txt b/src/Passes/CMakeLists.txt index 49fb66942b..0a55f495dd 100644 --- a/src/Passes/CMakeLists.txt +++ b/src/Passes/CMakeLists.txt @@ -9,9 +9,10 @@ message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") # Setting the standard configuration for the C++ compiler -# Rather than allowing C++17, we restrict the ourselves to -# C++14 as this is the standard currently used by LLVM. While -# there is a very small change that the difference in standard +# Rather than allowing C++17, we restrict ourselves to +# C++14 as this is the standard currently used by the LLVM +# project for compilation of the framework. While there is +# a very small chance that the difference in standard # would break things, it is a possibility nonetheless. set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) diff --git a/src/Passes/Makefile b/src/Passes/Makefile index e039211bb9..919e20dc8a 100644 --- a/src/Passes/Makefile +++ b/src/Passes/Makefile @@ -1,3 +1,6 @@ +nothing: + @echo "Preventing the user from accidently running the clean command." + clean: rm -rf Release/ rm -rf Debug/ diff --git a/src/Passes/README.md b/src/Passes/README.md index ade1b0c9f4..e8107da1e8 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -1,13 +1,13 @@ # Q# Passes for LLVM -This library defines LLVM passes used for analysing, optimising and transforming the IR. The Q# pass library is a dynamic library that can be compiled and ran separately from the -rest of the project code. While it is not clear whether this possible at the moment, we hope that it will be possible to write passes that enforce the QIR standard. +This library defines [LLVM passes](https://llvm.org/docs/Passes.html) used for analysing, optimising and transforming the IR. The Q# pass library is a dynamic library that can be compiled and ran separately from the +rest of the project code. While it is not clear whether this possible at the moment, we hope that it will be possible to write passes that enforce the [QIR specification](https://github.com/microsoft/qsharp-language/tree/main/Specifications/QIR). -## What does LLVM passes do? +## What do LLVM passes do? -Before getting started, we here provide a few examples of classical use cases for LLVM passes. +Before getting started, we here provide a few examples of classical use cases for [LLVM passes](https://llvm.org/docs/Passes.html). You find additional [instructive examples here][1]. -**Example 1: Transformation**. As a first example of what LLVM passes can do, we look at optimisation. Consider a compiler which +**Example 1: Transformation**. As a first example of what [LLVM passes](https://llvm.org/docs/Passes.html) can do, we look at optimisation. Consider a compiler which compiles ```c @@ -37,7 +37,7 @@ double test(double x) { } ``` -One purpose of LLVM passes is to allow automatic transformation from the above IR to the IR: +One purpose of [LLVM passes](https://llvm.org/docs/Passes.html) is to allow automatic transformation from the above IR to the IR: ``` define double @test(double %x) { @@ -138,12 +138,13 @@ call 2 --------------------------- ``` -**Example 3: Code validation**. A third use case is code validation. For example, one could write a pass to check whether bounds are exceeded on static arrays [2]. +**Example 3: Code validation**. A third use case is code validation. For example, one could write a pass to check whether bounds are exceeded on [static arrays][2]. Note that this is a non-standard usecase as such analysis is usually made using the AST rather than at the IR level. **References** -[1] https://github.com/banach-space/llvm-tutor#analysis-vs-transformation-pass -[2] https://github.com/victor-fdez/llvm-array-check-pass + +- [1] https://github.com/banach-space/llvm-tutor#analysis-vs-transformation-pass +- [2] https://github.com/victor-fdez/llvm-array-check-pass ## Out-of-source Pass @@ -184,12 +185,16 @@ and then make your target make [target] ``` +Valid targets are the name of the folders in `libs/` found in the passes root. + ## Running a pass -You can run a pass using `opt` as follows: +You can run a pass using [opt](https://llvm.org/docs/CommandGuide/opt.html) as follows: ```sh -opt -load-pass-plugin ../../{Debug,Release}/libQSharpPasses.{dylib,so} --passes="operation-counter" -disable-output classical-program.bc +cd examples/ClassicalIrCommandline +make emit-llvm-bc +opt -load-pass-plugin ../../{Debug,Release}/libOpsCounter.{dylib,so} --passes="print" -disable-output classical-program.bc ``` For a gentle introduction, see examples. diff --git a/src/Passes/examples/ClassicalIrCommandline/Makefile b/src/Passes/examples/ClassicalIrCommandline/Makefile index 2f39e8c4a4..64a96c1266 100644 --- a/src/Passes/examples/ClassicalIrCommandline/Makefile +++ b/src/Passes/examples/ClassicalIrCommandline/Makefile @@ -1,9 +1,6 @@ emit-llvm-cpp: clang -O3 -S -std=c++17 -emit-llvm classical-program.cpp -o classical-program.ll -emit-llvm-cpp-bin: - clang++ -O3 -std=c++17 -stdlib=libc++ classical-program.cpp -o a.out - emit-llvm: clang -O0 -S -emit-llvm classical-program.c -o classical-program.ll diff --git a/src/Passes/examples/OptimisationUsingOpt/README.md b/src/Passes/examples/OptimisationUsingOpt/README.md index 741c555ea3..7f84f1b2d5 100644 --- a/src/Passes/examples/OptimisationUsingOpt/README.md +++ b/src/Passes/examples/OptimisationUsingOpt/README.md @@ -54,5 +54,3 @@ entry: ``` plus a few extra delcarations. - -## Applying a pass diff --git a/src/Passes/manage b/src/Passes/manage index d49d9f2b6f..4da1f9bd64 100755 --- a/src/Passes/manage +++ b/src/Passes/manage @@ -9,4 +9,6 @@ sys.path.insert(0, os.path.join(ROOT, "site-packages")) # Loading the CLI tool and running it from TasksCI.cli import cli # noqa: E402 + +# Running the CLI tool defined in site-packages/TasksCI/cli cli() From df3e4d2f162063584a7c212a9da6199778ebad7e Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 10:19:31 +0200 Subject: [PATCH 29/48] Adding function analysis template --- .../FunctionAnalysis/Lib{name}.cpp.tpl | 47 ++++++++++++++ .../FunctionAnalysis/SPECIFICATION.md | 0 .../templates/FunctionAnalysis/{name}.cpp.tpl | 41 ++++++++++++ .../templates/FunctionAnalysis/{name}.hpp.tpl | 64 +++++++++++++++++++ 4 files changed, 152 insertions(+) create mode 100644 src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl create mode 100644 src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/SPECIFICATION.md create mode 100644 src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl create mode 100644 src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl new file mode 100644 index 0000000000..92121f04bf --- /dev/null +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" +#include "{name}/{name}.hpp" + +#include +#include + +namespace { +// Interface to plugin +llvm::PassPluginLibraryInfo get{name}PluginInfo() +{ + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "{name}", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the printer + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "{operation-name}") + { + fpm.addPass({name}Pass()); + return true; + } + return false; + }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { + fpm.addPass({name}Pass()); + }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { + fam.registerPass([] { return {name}Analytics(); }); + }); + }}; +} + +} // namespace + +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return get{name}PluginInfo(); +} diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/SPECIFICATION.md b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/SPECIFICATION.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl new file mode 100644 index 0000000000..3345c92eb1 --- /dev/null +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "{name}/{name}.hpp" + +#include "Llvm.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +{name}Analytics::Result {name}Analytics::run(llvm::Function &/*function*/, + llvm::FunctionAnalysisManager & /*unused*/) +{ + {name}Analytics::Result result; + + // Collect analytics here + + return result; +} + +llvm::PreservedAnalyses {name}Pass::run(llvm::Function & /*function*/, + llvm::FunctionAnalysisManager & /*fam*/) +{ + // auto &results = fam.getResult<{name}Analytics>(function); + + // Use analytics here + + return llvm::PreservedAnalyses::all(); +} + +bool {name}Pass::isRequired() +{ + return true; +} + +llvm::AnalysisKey {name}Analytics::Key; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl new file mode 100644 index 0000000000..9c13246f07 --- /dev/null +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl @@ -0,0 +1,64 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" + +namespace microsoft { +namespace quantum { + +class {name}Analytics : public llvm::AnalysisInfoMixin<{name}Analytics> +{ +public: + using Result = llvm::StringMap; ///< Change the type of the collected date here + + /// Constructors and destructors + /// @{ + {name}Analytics() = default; + {name}Analytics({name}Analytics const &) = delete; + {name}Analytics({name}Analytics &&) = default; + ~{name}Analytics() = default; + /// @} + + /// Operators + /// @{ + {name}Analytics &operator=({name}Analytics const &) = delete; + {name}Analytics &operator=({name}Analytics &&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + Result run(llvm::Function & function, llvm::FunctionAnalysisManager & /*unused*/); + /// @} + +private: + static llvm::AnalysisKey Key; // NOLINT + friend struct llvm::AnalysisInfoMixin<{name}Analytics>; +}; + +class {name}Pass : public llvm::PassInfoMixin<{name}Pass> +{ +public: + /// Constructors and destructors + /// @{ + {name}Pass() = default; + {name}Pass({name}Pass const &) = delete; + {name}Pass({name}Pass &&) = default; + ~{name}Pass() = default; + /// @} + + /// Operators + /// @{ + {name}Pass &operator=({name}Pass const &) = delete; + {name}Pass &operator=({name}Pass &&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function & function, llvm::FunctionAnalysisManager & fam); + static bool isRequired(); + /// @} +}; + +} // namespace quantum +} // namespace microsoft From 62e64cb892761ce613fee0cbbb800783a2732d43 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 10:30:02 +0200 Subject: [PATCH 30/48] Update of template --- .../FunctionAnalysis/Lib{name}.cpp.tpl | 8 ++++---- .../templates/FunctionAnalysis/{name}.cpp.tpl | 11 +++++++++-- .../templates/FunctionAnalysis/{name}.hpp.tpl | 17 ++++++++++------- 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl index 92121f04bf..1f55809910 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl @@ -16,12 +16,12 @@ llvm::PassPluginLibraryInfo get{name}PluginInfo() return { LLVM_PLUGIN_API_VERSION, "{name}", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the printer + // Registering a printer for the anaylsis pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, ArrayRef /*unused*/) { - if (name == "{operation-name}") + if (name == "print<{operation-name}>") { - fpm.addPass({name}Pass()); + fpm.addPass({name}Printer(llvm::errs())); return true; } return false; @@ -29,7 +29,7 @@ llvm::PassPluginLibraryInfo get{name}PluginInfo() pb.registerVectorizerStartEPCallback( [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { - fpm.addPass({name}Pass()); + fpm.addPass({name}Printer(llvm::errs())); }); // Registering the analysis module diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl index 3345c92eb1..1d0eea61b4 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.cpp.tpl @@ -20,17 +20,24 @@ namespace quantum { return result; } -llvm::PreservedAnalyses {name}Pass::run(llvm::Function & /*function*/, + +{name}Printer::{name}Printer(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) +{ +} + +llvm::PreservedAnalyses {name}Printer::run(llvm::Function & /*function*/, llvm::FunctionAnalysisManager & /*fam*/) { // auto &results = fam.getResult<{name}Analytics>(function); // Use analytics here + out_stream_ << "Analysis results are printed using this stream\n"; return llvm::PreservedAnalyses::all(); } -bool {name}Pass::isRequired() +bool {name}Printer::isRequired() { return true; } diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl index 9c13246f07..5dd8e664d9 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/{name}.hpp.tpl @@ -36,21 +36,22 @@ private: friend struct llvm::AnalysisInfoMixin<{name}Analytics>; }; -class {name}Pass : public llvm::PassInfoMixin<{name}Pass> +class {name}Printer : public llvm::PassInfoMixin<{name}Printer> { public: /// Constructors and destructors /// @{ - {name}Pass() = default; - {name}Pass({name}Pass const &) = delete; - {name}Pass({name}Pass &&) = default; - ~{name}Pass() = default; + explicit {name}Printer(llvm::raw_ostream& out_stream); + {name}Printer() = delete; + {name}Printer({name}Printer const &) = delete; + {name}Printer({name}Printer &&) = default; + ~{name}Printer() = default; /// @} /// Operators /// @{ - {name}Pass &operator=({name}Pass const &) = delete; - {name}Pass &operator=({name}Pass &&) = delete; + {name}Printer &operator=({name}Printer const &) = delete; + {name}Printer &operator=({name}Printer &&) = delete; /// @} /// Functions required by LLVM @@ -58,6 +59,8 @@ public: llvm::PreservedAnalyses run(llvm::Function & function, llvm::FunctionAnalysisManager & fam); static bool isRequired(); /// @} +private: + llvm::raw_ostream& out_stream_; }; } // namespace quantum From f0c47f2adc4d489b58a5f76ee6de4640009458e9 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 10:31:42 +0200 Subject: [PATCH 31/48] Adding boiler plate for const size array analysis --- .../ConstSizeArrayAnalysis.cpp | 48 +++++++++++++ .../ConstSizeArrayAnalysis.hpp | 67 +++++++++++++++++++ .../LibConstSizeArrayAnalysis.cpp | 47 +++++++++++++ .../ConstSizeArrayAnalysis/SPECIFICATION.md | 0 4 files changed, 162 insertions(+) create mode 100644 src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp create mode 100644 src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp create mode 100644 src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp create mode 100644 src/Passes/libs/ConstSizeArrayAnalysis/SPECIFICATION.md diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp new file mode 100644 index 0000000000..1859544c02 --- /dev/null +++ b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp" + +#include "Llvm.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +ConstSizeArrayAnalysisAnalytics::Result ConstSizeArrayAnalysisAnalytics::run(llvm::Function &/*function*/, + llvm::FunctionAnalysisManager & /*unused*/) +{ + ConstSizeArrayAnalysisAnalytics::Result result; + + // Collect analytics here + + return result; +} + + +ConstSizeArrayAnalysisPrinter::ConstSizeArrayAnalysisPrinter(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) +{ +} + +llvm::PreservedAnalyses ConstSizeArrayAnalysisPrinter::run(llvm::Function & /*function*/, + llvm::FunctionAnalysisManager & /*fam*/) +{ + // auto &results = fam.getResult(function); + + // Use analytics here + out_stream_ << "Analysis results are printed using this stream\n"; + + return llvm::PreservedAnalyses::all(); +} + +bool ConstSizeArrayAnalysisPrinter::isRequired() +{ + return true; +} + +llvm::AnalysisKey ConstSizeArrayAnalysisAnalytics::Key; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp new file mode 100644 index 0000000000..f063161039 --- /dev/null +++ b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp @@ -0,0 +1,67 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" + +namespace microsoft { +namespace quantum { + +class ConstSizeArrayAnalysisAnalytics : public llvm::AnalysisInfoMixin +{ +public: + using Result = llvm::StringMap; ///< Change the type of the collected date here + + /// Constructors and destructors + /// @{ + ConstSizeArrayAnalysisAnalytics() = default; + ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics const &) = delete; + ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics &&) = default; + ~ConstSizeArrayAnalysisAnalytics() = default; + /// @} + + /// Operators + /// @{ + ConstSizeArrayAnalysisAnalytics &operator=(ConstSizeArrayAnalysisAnalytics const &) = delete; + ConstSizeArrayAnalysisAnalytics &operator=(ConstSizeArrayAnalysisAnalytics &&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + Result run(llvm::Function & function, llvm::FunctionAnalysisManager & /*unused*/); + /// @} + +private: + static llvm::AnalysisKey Key; // NOLINT + friend struct llvm::AnalysisInfoMixin; +}; + +class ConstSizeArrayAnalysisPrinter : public llvm::PassInfoMixin +{ +public: + /// Constructors and destructors + /// @{ + explicit ConstSizeArrayAnalysisPrinter(llvm::raw_ostream& out_stream); + ConstSizeArrayAnalysisPrinter() = delete; + ConstSizeArrayAnalysisPrinter(ConstSizeArrayAnalysisPrinter const &) = delete; + ConstSizeArrayAnalysisPrinter(ConstSizeArrayAnalysisPrinter &&) = default; + ~ConstSizeArrayAnalysisPrinter() = default; + /// @} + + /// Operators + /// @{ + ConstSizeArrayAnalysisPrinter &operator=(ConstSizeArrayAnalysisPrinter const &) = delete; + ConstSizeArrayAnalysisPrinter &operator=(ConstSizeArrayAnalysisPrinter &&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function & function, llvm::FunctionAnalysisManager & fam); + static bool isRequired(); + /// @} +private: + llvm::raw_ostream& out_stream_; +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp b/src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp new file mode 100644 index 0000000000..c77715b8c3 --- /dev/null +++ b/src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" +#include "ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp" + +#include +#include + +namespace { +// Interface to plugin +llvm::PassPluginLibraryInfo getConstSizeArrayAnalysisPluginInfo() +{ + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "ConstSizeArrayAnalysis", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering a printer for the anaylsis + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "print<{operation-name}>") + { + fpm.addPass(ConstSizeArrayAnalysisPrinter(llvm::errs())); + return true; + } + return false; + }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { + fpm.addPass(ConstSizeArrayAnalysisPrinter(llvm::errs())); + }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { + fam.registerPass([] { return ConstSizeArrayAnalysisAnalytics(); }); + }); + }}; +} + +} // namespace + +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return getConstSizeArrayAnalysisPluginInfo(); +} diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/SPECIFICATION.md b/src/Passes/libs/ConstSizeArrayAnalysis/SPECIFICATION.md new file mode 100644 index 0000000000..e69de29bb2 From 49fe60156df3de81e1a63ccb2cb94b3e2a1e443c Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 12:16:45 +0200 Subject: [PATCH 32/48] Adding constant allocation identification --- .../examples/ClassicalIrCommandline/README.md | 2 +- .../ConstSizeArray/ConstSizeArray.csproj | 9 + .../ConstSizeArray/ConstSizeArray.qs | 16 + .../ConstSizeArray/Makefile | 6 + .../ConstSizeArray/qir/ConstSizeArray-O3.ll | 50 + .../ConstSizeArray/qir/ConstSizeArray.ll | 2039 +++++++++++++++++ .../examples/ConstSizeArrayAnalysis/Makefile | 5 + .../examples/ConstSizeArrayAnalysis/README.md | 74 + .../analysis-problem.ll | 50 + .../ConstSizeArrayAnalysis.cpp | 79 +- .../ConstSizeArrayAnalysis.hpp | 29 +- .../LibConstSizeArrayAnalysis.cpp | 2 +- .../FunctionAnalysis/Lib{name}.cpp.tpl | 2 +- 13 files changed, 2335 insertions(+), 28 deletions(-) create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.csproj create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/Makefile create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/README.md create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem.ll diff --git a/src/Passes/examples/ClassicalIrCommandline/README.md b/src/Passes/examples/ClassicalIrCommandline/README.md index b293fc6b5c..1227fdbe70 100644 --- a/src/Passes/examples/ClassicalIrCommandline/README.md +++ b/src/Passes/examples/ClassicalIrCommandline/README.md @@ -32,5 +32,5 @@ opt -load ../../{Debug,Release}/libQSharpPasses.{dylib,so} -legacy-operation-cou This part assumes that you have build the QsPasses library. ```sh -opt -load-pass-plugin ../../{Debug,Release}/libQSharpPasses.{dylib,so} --passes="operation-counter" -disable-output classical-program.bc +opt -load-pass-plugin ../../{Debug,Release}/libs/libQSharpPasses.{dylib,so} --passes="print" -disable-output classical-program.bc ``` diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.csproj b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.csproj new file mode 100644 index 0000000000..eeab572589 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.csproj @@ -0,0 +1,9 @@ + + + + Exe + netcoreapp3.1 + true + + + diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs new file mode 100644 index 0000000000..7d46c1a7af --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Example { + @EntryPoint() + operation Main() : Int + { + return QuantumFunction(10); + } + + operation QuantumFunction(nQubits : Int) : Int { + use qubits = Qubit[nQubits]; + + return 0; + } +} diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile new file mode 100644 index 0000000000..db141e9e19 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile @@ -0,0 +1,6 @@ + +clean: + rm -rf bin + rm -rf obj + rm -rf qir + \ No newline at end of file diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll new file mode 100644 index 0000000000..01315ee268 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll @@ -0,0 +1,50 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumFunction__body() + ret void +} + +define internal fastcc void @Example__QuantumFunction__body() unnamed_addr { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 10) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #0 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll new file mode 100644 index 0000000000..9279113efd --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll @@ -0,0 +1,2039 @@ + +%Range = type { i64, i64, i64 } +%Tuple = type opaque +%Array = type opaque +%Qubit = type opaque +%String = type opaque +%Callable = type opaque +%Result = type opaque + +@PauliI = internal constant i2 0 +@PauliX = internal constant i2 1 +@PauliY = internal constant i2 -1 +@PauliZ = internal constant i2 -2 +@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } +@PartialApplication__1 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__1 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] +@PartialApplication__2 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__3 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctladj__wrapper] +@MemoryManagement__2 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__2__RefCount, void (%Tuple*, i32)* @MemoryManagement__2__AliasCount] +@PartialApplication__4 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctladj__wrapper] +@PartialApplication__5 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctladj__wrapper] +@PartialApplication__6 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctladj__wrapper] +@PartialApplication__7 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__8 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__9 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctladj__wrapper] +@PartialApplication__10 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctladj__wrapper] +@PartialApplication__11 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctladj__wrapper] +@PartialApplication__12 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctladj__wrapper] + +define internal i64 @Example__Main__body() { +entry: + %0 = call i64 @Example__QuantumFunction__body(i64 10) + ret i64 %0 +} + +define internal i64 @Example__QuantumFunction__body(i64 %nQubits) { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nQubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret i64 0 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +define internal { %String* }* @Microsoft__Quantum__Diagnostics__EnableTestingViaName__body(%String* %__Item1__) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 + store %String* %__Item1__, %String** %2, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) + ret { %String* }* %1 +} + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +define internal { %String* }* @Microsoft__Quantum__Diagnostics__Test__body(%String* %ExecutionTarget) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 + store %String* %ExecutionTarget, %String** %2, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %ExecutionTarget, i32 1) + ret { %String* }* %1 +} + +define internal %Tuple* @Microsoft__Quantum__Core__Attribute__body() { +entry: + ret %Tuple* null +} + +define internal { %String* }* @Microsoft__Quantum__Core__Deprecated__body(%String* %NewName) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 + store %String* %NewName, %String** %2, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %NewName, i32 1) + ret { %String* }* %1 +} + +define internal %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { +entry: + ret %Tuple* null +} + +define internal %Tuple* @Microsoft__Quantum__Core__Inline__body() { +entry: + ret %Tuple* null +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +declare void @__quantum__qis__applyconditionallyintrinsic__body(%Array*, %Array*, %Callable*, %Callable*) + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResults = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %resultsValues = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onEqualOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 + %onNonEqualOp = load %Callable*, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %9, %Callable** %7, align 8 + store %Array* %ctls, %Array** %8, align 8 + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 + %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %14) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %14, %Callable** %12, align 8 + store %Array* %ctls, %Array** %13, align 8 + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %10) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResults = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %resultsValues = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onEqualOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 + %onNonEqualOp = load %Callable*, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %9, %Callable** %7, align 8 + store %Array* %ctls, %Array** %8, align 8 + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 + %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %14) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %14, %Callable** %12, align 8 + store %Array* %ctls, %Array** %13, align 8 + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctladj(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResults = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %resultsValues = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onEqualOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) + %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 + %onNonEqualOp = load %Callable*, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %9, %Callable** %7, align 8 + store %Array* %ctls, %Array** %8, align 8 + %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__5, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 + %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %14) + call void @__quantum__rt__callable_make_controlled(%Callable* %14) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %14, %Callable** %12, align 8 + store %Array* %ctls, %Array** %13, align 8 + %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__6, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) + call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +declare void @__quantum__qis__applyifelseintrinsic__body(%Result*, %Callable*, %Callable*) + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResult = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %onResultZeroOp = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onResultOneOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %8, %Callable** %6, align 8 + store %Array* %ctls, %Array** %7, align 8 + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__7, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %4) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %13, %Callable** %11, align 8 + store %Array* %ctls, %Array** %12, align 8 + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__8, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %9) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResult = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %onResultZeroOp = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onResultOneOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %8, %Callable** %6, align 8 + store %Array* %ctls, %Array** %7, align 8 + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__9, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %13, %Callable** %11, align 8 + store %Array* %ctls, %Array** %12, align 8 + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__10, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctladj(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %measurementResult = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %onResultZeroOp = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) + %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %onResultOneOp = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %8, %Callable** %6, align 8 + store %Array* %ctls, %Array** %7, align 8 + %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__11, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Callable* %13, %Callable** %11, align 8 + store %Array* %ctls, %Array** %12, align 8 + %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__12, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) + call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* + %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + store %Tuple* null, %Tuple** %6, align 8 + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %9) + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* + %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Tuple*, %Tuple** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* + %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Tuple* %4, %Tuple** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal { %String*, %String* }* @Microsoft__Quantum__Targeting__RequiresCapability__body(%String* %Level, %String* %Reason) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %1 = bitcast %Tuple* %0 to { %String*, %String* }* + %2 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 1 + store %String* %Level, %String** %2, align 8 + store %String* %Reason, %String** %3, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %Level, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %Reason, i32 1) + ret { %String*, %String* }* %1 +} + +define internal { %String* }* @Microsoft__Quantum__Targeting__TargetInstruction__body(%String* %__Item1__) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 + store %String* %__Item1__, %String** %2, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) + ret { %String* }* %1 +} + +define i64 @Example__Main__Interop() #0 { +entry: + %0 = call i64 @Example__Main__body() + ret i64 %0 +} + +define void @Example__Main() #1 { +entry: + %0 = call i64 @Example__Main__body() + %1 = call %String* @__quantum__rt__int_to_string(i64 %0) + call void @__quantum__rt__message(%String* %1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) + +declare %String* @__quantum__rt__int_to_string(i64) + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile new file mode 100644 index 0000000000..fa5dd88fb0 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile @@ -0,0 +1,5 @@ +run: build + opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem.ll + +build: + pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make ConstSizeArrayAnalysis && popd || popd diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/README.md b/src/Passes/examples/ConstSizeArrayAnalysis/README.md new file mode 100644 index 0000000000..3d662c5b9f --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/README.md @@ -0,0 +1,74 @@ +# ConstSizeArray + +## Running the analysis + +Ensure that you have build the latest version of the pass + +```sh +% make build +``` + +```sh +% opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem.ll +``` + +## Generating an example QIR + +Build the QIR + +```sh +cd ConstSizeArray +dotnet build ConstSizeArray.csproj +``` + +Strip it of unecessary information + +```sh +opt -S qir/ConstSizeArray.ll -O1 > qir/Problem.ll +``` + +Result should be similar to + +``` +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #0 { +entry: + %qubits.i.i = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 10) + tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 1) + tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 -1) + tail call void @__quantum__rt__qubit_release_array(%Array* %qubits.i.i) + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #1 { +entry: + %qubits.i.i = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 10) + tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 1) + tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 -1) + tail call void @__quantum__rt__qubit_release_array(%Array* %qubits.i.i) + %0 = tail call %String* @__quantum__rt__int_to_string(i64 0) + tail call void @__quantum__rt__message(%String* %0) + tail call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } +``` diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem.ll new file mode 100644 index 0000000000..01315ee268 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem.ll @@ -0,0 +1,50 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumFunction__body() + ret void +} + +define internal fastcc void @Example__QuantumFunction__body() unnamed_addr { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 10) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #0 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp index 1859544c02..37c455961b 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp +++ b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp @@ -10,29 +10,86 @@ namespace microsoft { namespace quantum { -ConstSizeArrayAnalysisAnalytics::Result ConstSizeArrayAnalysisAnalytics::run(llvm::Function &/*function*/, - llvm::FunctionAnalysisManager & /*unused*/) +ConstSizeArrayAnalysisAnalytics::Result ConstSizeArrayAnalysisAnalytics::run( + llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) { ConstSizeArrayAnalysisAnalytics::Result result; // Collect analytics here + // Use analytics here + for (auto &basic_block : function) + { + for (auto &instruction : basic_block) + { + // Skipping debug code + if (instruction.isDebugOrPseudoInst()) + { + continue; + } + + // Checking if it is a call instruction + auto *call_instr = llvm::dyn_cast(&instruction); + if (call_instr == nullptr) + { + continue; + } + + auto target_function = call_instr->getCalledFunction(); + auto name = target_function->getName(); + + // TODO(tfr): Find a better way to inject runtime symbols + if (name != "__quantum__rt__qubit_allocate_array") + { + continue; + } + + // Validating that there exactly one argument + if (call_instr->arg_size() != 1) + { + continue; + } + + // Getting the size of the argument + auto size_value = call_instr->getArgOperand(0); + if (size_value == nullptr) + { + continue; + } + + // Checking if the value is constant + auto cst = llvm::dyn_cast(size_value); + if (cst == nullptr) + { + continue; + } + + result[name] = cst->getValue().getSExtValue(); + } + } + return result; } - -ConstSizeArrayAnalysisPrinter::ConstSizeArrayAnalysisPrinter(llvm::raw_ostream& out_stream) +ConstSizeArrayAnalysisPrinter::ConstSizeArrayAnalysisPrinter(llvm::raw_ostream &out_stream) : out_stream_(out_stream) -{ -} +{} -llvm::PreservedAnalyses ConstSizeArrayAnalysisPrinter::run(llvm::Function & /*function*/, - llvm::FunctionAnalysisManager & /*fam*/) +llvm::PreservedAnalyses ConstSizeArrayAnalysisPrinter::run(llvm::Function & function, + llvm::FunctionAnalysisManager &fam) { - // auto &results = fam.getResult(function); + auto &results = fam.getResult(function); - // Use analytics here - out_stream_ << "Analysis results are printed using this stream\n"; + if (!results.empty()) + { + out_stream_ << function.getName() << "\n"; + out_stream_ << "====================" + << "\n\n"; + for (auto const &size_info : results) + { + out_stream_ << size_info.first() << ": " << size_info.second << "\n"; + } + } return llvm::PreservedAnalyses::all(); } diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp index f063161039..b30abcf9b9 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp +++ b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp @@ -7,17 +7,18 @@ namespace microsoft { namespace quantum { -class ConstSizeArrayAnalysisAnalytics : public llvm::AnalysisInfoMixin +class ConstSizeArrayAnalysisAnalytics + : public llvm::AnalysisInfoMixin { public: - using Result = llvm::StringMap; ///< Change the type of the collected date here + using Result = llvm::StringMap; ///< Change the type of the collected date here /// Constructors and destructors /// @{ - ConstSizeArrayAnalysisAnalytics() = default; - ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics const &) = delete; - ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics &&) = default; - ~ConstSizeArrayAnalysisAnalytics() = default; + ConstSizeArrayAnalysisAnalytics() = default; + ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics const &) = delete; + ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics &&) = default; + ~ConstSizeArrayAnalysisAnalytics() = default; /// @} /// Operators @@ -28,7 +29,7 @@ class ConstSizeArrayAnalysisAnalytics : public llvm::AnalysisInfoMixin /*unused*/) { - if (name == "print<{operation-name}>") + if (name == "print") { fpm.addPass(ConstSizeArrayAnalysisPrinter(llvm::errs())); return true; diff --git a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl index 1f55809910..beb4589fe1 100644 --- a/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl +++ b/src/Passes/site-packages/TasksCI/templates/FunctionAnalysis/Lib{name}.cpp.tpl @@ -19,7 +19,7 @@ llvm::PassPluginLibraryInfo get{name}PluginInfo() // Registering a printer for the anaylsis pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, ArrayRef /*unused*/) { - if (name == "print<{operation-name}>") + if (name == "print<{operation_name}>") { fpm.addPass({name}Printer(llvm::errs())); return true; From cc1a9791cd7d1e2e617400b4d3083489c2ecc9e4 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 12:17:52 +0200 Subject: [PATCH 33/48] Removing garbage --- .../ConstSizeArray/qir/ConstSizeArray-O3.ll | 50 - .../ConstSizeArray/qir/ConstSizeArray.ll | 2039 ----------------- 2 files changed, 2089 deletions(-) delete mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll delete mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll deleted file mode 100644 index 01315ee268..0000000000 --- a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray-O3.ll +++ /dev/null @@ -1,50 +0,0 @@ -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Array = type opaque -%String = type opaque - -define internal fastcc void @Example__Main__body() unnamed_addr { -entry: - call fastcc void @Example__QuantumFunction__body() - ret void -} - -define internal fastcc void @Example__QuantumFunction__body() unnamed_addr { -entry: - %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 10) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits) - ret void -} - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define i64 @Example__Main__Interop() local_unnamed_addr #0 { -entry: - call fastcc void @Example__Main__body() - ret i64 0 -} - -define void @Example__Main() local_unnamed_addr #1 { -entry: - call fastcc void @Example__Main__body() - %0 = call %String* @__quantum__rt__int_to_string(i64 0) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll deleted file mode 100644 index 9279113efd..0000000000 --- a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/qir/ConstSizeArray.ll +++ /dev/null @@ -1,2039 +0,0 @@ - -%Range = type { i64, i64, i64 } -%Tuple = type opaque -%Array = type opaque -%Qubit = type opaque -%String = type opaque -%Callable = type opaque -%Result = type opaque - -@PauliI = internal constant i2 0 -@PauliX = internal constant i2 1 -@PauliY = internal constant i2 -1 -@PauliZ = internal constant i2 -2 -@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } -@PartialApplication__1 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@MemoryManagement__1 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] -@PartialApplication__2 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__3 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctladj__wrapper] -@MemoryManagement__2 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__2__RefCount, void (%Tuple*, i32)* @MemoryManagement__2__AliasCount] -@PartialApplication__4 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctladj__wrapper] -@PartialApplication__5 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctladj__wrapper] -@PartialApplication__6 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctladj__wrapper] -@PartialApplication__7 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__8 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__9 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctladj__wrapper] -@PartialApplication__10 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctladj__wrapper] -@PartialApplication__11 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctladj__wrapper] -@PartialApplication__12 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctladj__wrapper] - -define internal i64 @Example__Main__body() { -entry: - %0 = call i64 @Example__QuantumFunction__body(i64 10) - ret i64 %0 -} - -define internal i64 @Example__QuantumFunction__body(i64 %nQubits) { -entry: - %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nQubits) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits) - ret i64 0 -} - -declare %Qubit* @__quantum__rt__qubit_allocate() - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) - -declare void @__quantum__rt__qubit_release_array(%Array*) - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) - -define internal { %String* }* @Microsoft__Quantum__Diagnostics__EnableTestingViaName__body(%String* %__Item1__) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %__Item1__, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) - ret { %String* }* %1 -} - -declare %Tuple* @__quantum__rt__tuple_create(i64) - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) - -define internal { %String* }* @Microsoft__Quantum__Diagnostics__Test__body(%String* %ExecutionTarget) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %ExecutionTarget, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %ExecutionTarget, i32 1) - ret { %String* }* %1 -} - -define internal %Tuple* @Microsoft__Quantum__Core__Attribute__body() { -entry: - ret %Tuple* null -} - -define internal { %String* }* @Microsoft__Quantum__Core__Deprecated__body(%String* %NewName) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %NewName, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %NewName, i32 1) - ret { %String* }* %1 -} - -define internal %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { -entry: - ret %Tuple* null -} - -define internal %Tuple* @Microsoft__Quantum__Core__Inline__body() { -entry: - ret %Tuple* null -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) - -declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) - -declare void @__quantum__qis__applyconditionallyintrinsic__body(%Array*, %Array*, %Callable*, %Callable*) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) - -declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) - -declare void @__quantum__rt__callable_make_adjoint(%Callable*) - -declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResults = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %resultsValues = load %Array*, %Array** %2, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onEqualOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 - %onNonEqualOp = load %Callable*, %Callable** %4, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 - %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %9) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %9, %Callable** %7, align 8 - store %Array* %ctls, %Array** %8, align 8 - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %5) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 - %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 - %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %14) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %14, %Callable** %12, align 8 - store %Array* %ctls, %Array** %13, align 8 - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %10) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) - -declare void @__quantum__rt__callable_make_controlled(%Callable*) - -declare void @__quantum__rt__array_update_reference_count(%Array*, i32) - -define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) - ret void -} - -define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) - ret void -} - -define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__2__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) - -declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) - -declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResults = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %resultsValues = load %Array*, %Array** %2, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onEqualOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 - %onNonEqualOp = load %Callable*, %Callable** %4, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 - %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %9) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %9, %Callable** %7, align 8 - store %Array* %ctls, %Array** %8, align 8 - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 - %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 - %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %14) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %14, %Callable** %12, align 8 - store %Array* %ctls, %Array** %13, align 8 - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__3__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__3__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__3__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i32 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) - ret void -} - -define internal void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) - ret void -} - -define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__4__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__4__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__4__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctladj(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResults = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %resultsValues = load %Array*, %Array** %2, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onEqualOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 - %onNonEqualOp = load %Callable*, %Callable** %4, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 - %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_make_controlled(%Callable* %9) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %9, %Callable** %7, align 8 - store %Array* %ctls, %Array** %8, align 8 - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__5, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 - %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 - %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %14) - call void @__quantum__rt__callable_make_controlled(%Callable* %14) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %14, %Callable** %12, align 8 - store %Array* %ctls, %Array** %13, align 8 - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__6, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__5__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__5__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__5__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__5__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__6__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__6__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__6__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__6__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -declare void @__quantum__qis__applyifelseintrinsic__body(%Result*, %Callable*, %Callable*) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResult = load %Result*, %Result** %1, align 8 - %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %onResultZeroOp = load %Callable*, %Callable** %2, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onResultOneOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %8) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %8, %Callable** %6, align 8 - store %Array* %ctls, %Array** %7, align 8 - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__7, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %4) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* - %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 - %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %13) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %13, %Callable** %11, align 8 - store %Array* %ctls, %Array** %12, align 8 - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__8, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %9) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__7__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__7__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__8__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__8__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResult = load %Result*, %Result** %1, align 8 - %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %onResultZeroOp = load %Callable*, %Callable** %2, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onResultOneOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %8) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %8, %Callable** %6, align 8 - store %Array* %ctls, %Array** %7, align 8 - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__9, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* - %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 - %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %13) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %13, %Callable** %11, align 8 - store %Array* %ctls, %Array** %12, align 8 - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__10, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__9__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__9__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__9__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__9__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__10__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__10__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__10__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__10__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctladj(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResult = load %Result*, %Result** %1, align 8 - %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %onResultZeroOp = load %Callable*, %Callable** %2, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onResultOneOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %8) - call void @__quantum__rt__callable_make_controlled(%Callable* %8) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %8, %Callable** %6, align 8 - store %Array* %ctls, %Array** %7, align 8 - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__11, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* - %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 - %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %13) - call void @__quantum__rt__callable_make_controlled(%Callable* %13) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %13, %Callable** %11, align 8 - store %Array* %ctls, %Array** %12, align 8 - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__12, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__11__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__11__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__11__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__11__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__12__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__12__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__12__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__12__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal { %String*, %String* }* @Microsoft__Quantum__Targeting__RequiresCapability__body(%String* %Level, %String* %Reason) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %1 = bitcast %Tuple* %0 to { %String*, %String* }* - %2 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 0 - %3 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 1 - store %String* %Level, %String** %2, align 8 - store %String* %Reason, %String** %3, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %Level, i32 1) - call void @__quantum__rt__string_update_reference_count(%String* %Reason, i32 1) - ret { %String*, %String* }* %1 -} - -define internal { %String* }* @Microsoft__Quantum__Targeting__TargetInstruction__body(%String* %__Item1__) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %__Item1__, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) - ret { %String* }* %1 -} - -define i64 @Example__Main__Interop() #0 { -entry: - %0 = call i64 @Example__Main__body() - ret i64 %0 -} - -define void @Example__Main() #1 { -entry: - %0 = call i64 @Example__Main__body() - %1 = call %String* @__quantum__rt__int_to_string(i64 %0) - call void @__quantum__rt__message(%String* %1) - call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) - -declare %String* @__quantum__rt__int_to_string(i64) - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } From 7319eb435417b8ddceb640e9e6acf9f257a51626 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 12:33:05 +0200 Subject: [PATCH 34/48] Adding slightly harder example --- .../ConstSizeArray/ConstSizeArray.qs | 2 +- .../examples/ConstSizeArrayAnalysis/Makefile | 4 +- ...lysis-problem.ll => analysis-problem-1.ll} | 0 .../analysis-problem-2.ll | 51 +++++++++++++++++++ 4 files changed, 54 insertions(+), 3 deletions(-) rename src/Passes/examples/ConstSizeArrayAnalysis/{analysis-problem.ll => analysis-problem-1.ll} (100%) create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs index 7d46c1a7af..53b9e4cc36 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -5,7 +5,7 @@ namespace Example { @EntryPoint() operation Main() : Int { - return QuantumFunction(10); + return QuantumFunction(10) + QuantumFunction(3); } operation QuantumFunction(nQubits : Int) : Int { diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile index fa5dd88fb0..bf0b756b86 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile +++ b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile @@ -1,5 +1,5 @@ run: build - opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem.ll - + opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem-1.ll + build: pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make ConstSizeArrayAnalysis && popd || popd diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-1.ll similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem.ll rename to src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-1.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll new file mode 100644 index 0000000000..14b286b182 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll @@ -0,0 +1,51 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumFunction__body(i64 10) + call fastcc void @Example__QuantumFunction__body(i64 3) + ret void +} + +define internal fastcc void @Example__QuantumFunction__body(i64 %nQubits) unnamed_addr { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nQubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #0 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } From c051ee678b4fe2a1caea9a68c3145cbf3c8e56de Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Mon, 26 Jul 2021 14:00:21 +0200 Subject: [PATCH 35/48] Updating with additional example --- .../ConstSizeArray/ConstSizeArray.qs | 8 ++++---- .../examples/ConstSizeArrayAnalysis/analysis-problem-2.ll | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs index 53b9e4cc36..e59bf41637 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -5,12 +5,12 @@ namespace Example { @EntryPoint() operation Main() : Int { - return QuantumFunction(10) + QuantumFunction(3); + QuantumFunction(3); + QuantumFunction(10) ; + return 0; } - operation QuantumFunction(nQubits : Int) : Int { + operation QuantumFunction(nQubits : Int) : Unit { use qubits = Qubit[nQubits]; - - return 0; } } diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll index 14b286b182..2577a9de2f 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll +++ b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll @@ -6,8 +6,8 @@ source_filename = "qir/ConstSizeArray.ll" define internal fastcc void @Example__Main__body() unnamed_addr { entry: - call fastcc void @Example__QuantumFunction__body(i64 10) call fastcc void @Example__QuantumFunction__body(i64 3) + call fastcc void @Example__QuantumFunction__body(i64 10) ret void } From 6f34ee8f54b27db1db707110749caeba920f7478 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 12:37:37 +0200 Subject: [PATCH 36/48] Adding static analysis for qubit allocation --- src/Passes/.clang-tidy | 26 +- .../ConstSizeArray/Comparison.cpp | 18 ++ .../ConstSizeArray/ConstSizeArray.qs | 25 +- .../ConstSizeArray/Makefile | 3 +- .../ConstSizeArray/comparison.ll | 75 +++++ .../examples/ConstSizeArrayAnalysis/Makefile | 4 +- .../examples/ConstSizeArrayAnalysis/README.md | 86 ++++++ .../analysis-problem-2.ll | 8 +- .../analysis-problem-3.ll | 52 ++++ .../analysis-problem-4.ll | 80 ++++++ .../ConstSizeArrayAnalysis.cpp | 263 +++++++++++++++--- .../ConstSizeArrayAnalysis.hpp | 44 ++- 12 files changed, 613 insertions(+), 71 deletions(-) create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Comparison.cpp create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/comparison.ll create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-3.ll create mode 100644 src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-4.ll diff --git a/src/Passes/.clang-tidy b/src/Passes/.clang-tidy index a0f64b8d21..7260427b82 100644 --- a/src/Passes/.clang-tidy +++ b/src/Passes/.clang-tidy @@ -66,11 +66,23 @@ CheckOptions: - key: readability-identifier-naming.ProtectedMemberSuffix value: '_' - # Alias + # Type Alias and Enum Types / constants - key: readability-identifier-naming.TypeAliasCase value: 'CamelCase' - key: readability-identifier-naming.TypedefCase value: 'CamelCase' + - key: readability-identifier-naming.EnumCase + value: 'CamelCase' + - key: readability-identifier-naming.EnumConstantCase + value: 'CamelCase' + + # Globals, consts and enums + - key: readability-identifier-naming.GlobalConstantCase + value: 'UPPER_CASE' + - key: readability-identifier-naming.GlobalConstantPrefix + value: 'G_' + - key: readability-identifier-naming.ConstantCase + value: 'UPPER_CASE' # Functions - key: readability-identifier-naming.FunctionCase @@ -86,18 +98,6 @@ CheckOptions: - key: readability-identifier-naming.ParameterCase value: 'lower_case' - # Globals, consts and enums - - key: readability-identifier-naming.GlobalConstantCase - value: 'UPPER_CASE' - - key: readability-identifier-naming.GlobalConstantPrefix - value: 'G_' - - key: readability-identifier-naming.ConstantCase - value: 'UPPER_CASE' - - key: readability-identifier-naming.EnumCase - value: 'CamelCase' - - key: readability-identifier-naming.EnumConstantCase - value: 'CamelCase' - # Macros - key: readability-identifier-naming.MacroDefinitionCase value: 'UPPER_CASE' diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Comparison.cpp b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Comparison.cpp new file mode 100644 index 0000000000..00f2b75f5f --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Comparison.cpp @@ -0,0 +1,18 @@ +#include + +void QuantumFunction(int32_t nQubits) +{ + volatile uint64_t x = 3; + for (uint64_t i = 0; i < x; ++i) + { + nQubits += nQubits; + } + int32_t qubits[nQubits]; +} + +int main() +{ + QuantumFunction(10); + QuantumFunction(3); + return 0; +} \ No newline at end of file diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs index e59bf41637..ae895648ed 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -1,16 +1,25 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - namespace Example { + @EntryPoint() operation Main() : Int { - QuantumFunction(3); - QuantumFunction(10) ; + QuantumProgram(3,2,1); + QuantumProgram(4,9,4); return 0; } - operation QuantumFunction(nQubits : Int) : Unit { - use qubits = Qubit[nQubits]; + function X(value: Int): Int + { + return 3 * value; + } + + operation QuantumProgram(x: Int, h: Int, g: Int) : Unit { + let z = x * (x + 1) - 47; + let y = 3 * x; + + use qubits1 = Qubit[(y - 2)/2-z]; + use qubits2 = Qubit[y - g]; + use qubits3 = Qubit[h]; + use qubits4 = Qubit[X(x)]; } -} +} \ No newline at end of file diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile index db141e9e19..fa97ab5b45 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile @@ -1,4 +1,5 @@ - +comparison: + clang++ -S -emit-llvm -std=c++14 -stdlib=libc++ Comparison.cpp -o comparison.ll clean: rm -rf bin rm -rf obj diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/comparison.ll b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/comparison.ll new file mode 100644 index 0000000000..0e2c5308a3 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/comparison.ll @@ -0,0 +1,75 @@ +; ModuleID = 'Comparison.cpp' +source_filename = "Comparison.cpp" +target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx11.0.0" + +; Function Attrs: noinline nounwind optnone ssp uwtable mustprogress +define dso_local void @_Z15QuantumFunctioni(i32 %0) #0 { + %2 = alloca i32, align 4 + %3 = alloca i64, align 8 + %4 = alloca i64, align 8 + %5 = alloca i8*, align 8 + %6 = alloca i64, align 8 + store i32 %0, i32* %2, align 4 + store volatile i64 3, i64* %3, align 8 + store i64 0, i64* %4, align 8 + br label %7 + +7: ; preds = %15, %1 + %8 = load i64, i64* %4, align 8 + %9 = load volatile i64, i64* %3, align 8 + %10 = icmp ult i64 %8, %9 + br i1 %10, label %11, label %18 + +11: ; preds = %7 + %12 = load i32, i32* %2, align 4 + %13 = load i32, i32* %2, align 4 + %14 = add nsw i32 %13, %12 + store i32 %14, i32* %2, align 4 + br label %15 + +15: ; preds = %11 + %16 = load i64, i64* %4, align 8 + %17 = add i64 %16, 1 + store i64 %17, i64* %4, align 8 + br label %7, !llvm.loop !3 + +18: ; preds = %7 + %19 = load i32, i32* %2, align 4 + %20 = zext i32 %19 to i64 + %21 = call i8* @llvm.stacksave() + store i8* %21, i8** %5, align 8 + %22 = alloca i32, i64 %20, align 16 + store i64 %20, i64* %6, align 8 + %23 = load i8*, i8** %5, align 8 + call void @llvm.stackrestore(i8* %23) + ret void +} + +; Function Attrs: nofree nosync nounwind willreturn +declare i8* @llvm.stacksave() #1 + +; Function Attrs: nofree nosync nounwind willreturn +declare void @llvm.stackrestore(i8*) #1 + +; Function Attrs: noinline norecurse nounwind optnone ssp uwtable mustprogress +define dso_local i32 @main() #2 { + %1 = alloca i32, align 4 + store i32 0, i32* %1, align 4 + call void @_Z15QuantumFunctioni(i32 10) + call void @_Z15QuantumFunctioni(i32 3) + ret i32 0 +} + +attributes #0 = { noinline nounwind optnone ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nofree nosync nounwind willreturn } +attributes #2 = { noinline norecurse nounwind optnone ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } + +!llvm.module.flags = !{!0, !1} +!llvm.ident = !{!2} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{!"Homebrew clang version 12.0.1"} +!3 = distinct !{!3, !4} +!4 = !{!"llvm.loop.mustprogress"} diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile index bf0b756b86..4050079dfe 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile +++ b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile @@ -1,5 +1,7 @@ run: build - opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem-1.ll +# opt -load-pass-plugin ../../Debug/libs/libMetaData.dylib --passes="meta-data" -S analysis-problem-4.ll + opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem-4.ll build: pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make ConstSizeArrayAnalysis && popd || popd + pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make MetaData && popd || popd diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/README.md b/src/Passes/examples/ConstSizeArrayAnalysis/README.md index 3d662c5b9f..fba3828ece 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/README.md +++ b/src/Passes/examples/ConstSizeArrayAnalysis/README.md @@ -12,6 +12,88 @@ Ensure that you have build the latest version of the pass % opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem.ll ``` +## Cases to consider + +```qsharp +namespace Example { + + @EntryPoint() + operation Main() : Int + { + QuantumProgram(); + return 0; + } + + + + operation QuantumProgram(x: Int) : Unit { + use qubits = Qubit[3]; + } + +} +``` + +```qsharp +namespace Example { + + @EntryPoint() + operation Main() : Int + { + QuantumProgram(3); + QuantumProgram(4); + return 0; + } + + + + operation QuantumProgram(x: Int) : Unit { + use qubits = Qubit[x]; + } + +} +``` + +```qsharp +namespace Example { + + @EntryPoint() + operation Main() : Int + { + QuantumProgram(3); + QuantumProgram(4); + return 0; + } + + + + operation QuantumProgram(x: Int) : Unit { + use qubits = Qubit[x * x]; + } + +} +``` + +```qsharp +namespace Example { + + @EntryPoint() + operation Main() : Int + { + QuantumProgram(ComputeNumberOfQubits); + return 0; + } + + function ComputeNumberOfQubits(x: Int): Int { + return x * x; + } + + operation QuantumProgram(fnc : Int -> Int) : Unit { + use qubits = Qubit[fnc(3)]; + } + +} +``` + ## Generating an example QIR Build the QIR @@ -72,3 +154,7 @@ declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr attributes #0 = { "InteropFriendly" } attributes #1 = { "EntryPoint" } ``` + +# Notes + +To make a proper version of Const Size deduction, look at the [constant folding](https://llvm.org/doxygen/ConstantFolding_8cpp_source.html) implementation and in particular, the [target library](https://llvm.org/doxygen/classllvm_1_1TargetLibraryInfo.html) which essentially promotes information about the runtime. diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll index 2577a9de2f..ea4ead0400 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll +++ b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll @@ -6,14 +6,14 @@ source_filename = "qir/ConstSizeArray.ll" define internal fastcc void @Example__Main__body() unnamed_addr { entry: - call fastcc void @Example__QuantumFunction__body(i64 3) - call fastcc void @Example__QuantumFunction__body(i64 10) + call fastcc void @Example__QuantumProgram__body(i64 3) + call fastcc void @Example__QuantumProgram__body(i64 4) ret void } -define internal fastcc void @Example__QuantumFunction__body(i64 %nQubits) unnamed_addr { +define internal fastcc void @Example__QuantumProgram__body(i64 %x) unnamed_addr { entry: - %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nQubits) + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %x) call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) call void @__quantum__rt__qubit_release_array(%Array* %qubits) diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-3.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-3.ll new file mode 100644 index 0000000000..65703f0d8e --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-3.ll @@ -0,0 +1,52 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumProgram__body(i64 3) + call fastcc void @Example__QuantumProgram__body(i64 4) + ret void +} + +define internal fastcc void @Example__QuantumProgram__body(i64 %x) unnamed_addr { +entry: + %0 = mul i64 %x, %x + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #0 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-4.ll b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-4.ll new file mode 100644 index 0000000000..420563f193 --- /dev/null +++ b/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-4.ll @@ -0,0 +1,80 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumProgram__body(i64 3, i64 2, i64 1) + call fastcc void @Example__QuantumProgram__body(i64 4, i64 9, i64 4) + ret void +} + +define internal fastcc void @Example__QuantumProgram__body(i64 %x, i64 %h, i64 %g) unnamed_addr { +entry: + %.neg = xor i64 %x, -1 + %.neg1 = mul i64 %.neg, %x + %z.neg = add i64 %.neg1, 47 + %y = mul i64 %x, 3 + %0 = add i64 %y, -2 + %1 = lshr i64 %0, 1 + %2 = add i64 %z.neg, %1 + %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) + %3 = sub i64 %y, %g + %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) + %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %h) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) + %4 = call fastcc i64 @Example__X__body(i64 %x) + %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits1) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +; Function Attrs: norecurse nounwind readnone willreturn +define internal fastcc i64 @Example__X__body(i64 %value) unnamed_addr #0 { +entry: + %0 = mul i64 %value, 3 + ret i64 %0 +} + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #2 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { norecurse nounwind readnone willreturn } +attributes #1 = { "InteropFriendly" } +attributes #2 = { "EntryPoint" } diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp index 37c455961b..2a8d0eaab6 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp +++ b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp @@ -7,68 +7,240 @@ #include #include +#include namespace microsoft { namespace quantum { -ConstSizeArrayAnalysisAnalytics::Result ConstSizeArrayAnalysisAnalytics::run( - llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) + +bool ConstSizeArrayAnalysisAnalytics::operandsConstant(Instruction const &instruction) const { - ConstSizeArrayAnalysisAnalytics::Result result; + bool ret = true; - // Collect analytics here + // Checking that all oprands are constant + for (auto &op : instruction.operands()) + { - // Use analytics here - for (auto &basic_block : function) + auto const_arg = value_depending_on_args_.find(op) != value_depending_on_args_.end(); + auto cst = llvm::dyn_cast(op); + auto is_constant = (cst != nullptr); + + ret = ret && (const_arg || is_constant); + } + + return ret; +} + +void ConstSizeArrayAnalysisAnalytics::markPossibleConstant(Instruction &instruction) +{ + /* + // Rename constant variables + if (!instruction.hasName()) { - for (auto &instruction : basic_block) + // Naming result + char new_name[64] = {0}; + auto fmt = llvm::format("microsoft_reserved_possible_const_ret%u", tmp_counter_); + fmt.print(new_name, 64); + instruction.setName(new_name); + } + */ + + // Creating arg dependencies + ArgList all_dependencies{}; + for (auto &op : instruction.operands()) + { + auto it = value_depending_on_args_.find(op); + if (it != value_depending_on_args_.end()) { - // Skipping debug code - if (instruction.isDebugOrPseudoInst()) + for (auto &arg : it->second) { - continue; + all_dependencies.insert(arg); } + } + } - // Checking if it is a call instruction - auto *call_instr = llvm::dyn_cast(&instruction); - if (call_instr == nullptr) - { - continue; - } + // Adding the new name to the list + value_depending_on_args_.insert({&instruction, all_dependencies}); +} - auto target_function = call_instr->getCalledFunction(); - auto name = target_function->getName(); +void ConstSizeArrayAnalysisAnalytics::analyseCall(Instruction &instruction) +{ + // Skipping debug code + if (instruction.isDebugOrPseudoInst()) + { + return; + } - // TODO(tfr): Find a better way to inject runtime symbols - if (name != "__quantum__rt__qubit_allocate_array") - { - continue; - } + auto *call_instr = llvm::dyn_cast(&instruction); + if (call_instr == nullptr) + { + return; + } - // Validating that there exactly one argument - if (call_instr->arg_size() != 1) - { - continue; - } + auto target_function = call_instr->getCalledFunction(); + auto name = target_function->getName(); - // Getting the size of the argument - auto size_value = call_instr->getArgOperand(0); - if (size_value == nullptr) - { - continue; - } + // TODO(tfr): Make use of TargetLibrary + if (name != "__quantum__rt__qubit_allocate_array") + { + return; + } + + if (call_instr->arg_size() != 1) + { + llvm::errs() << "Expected exactly one argument\n"; + return; + } + + auto argument = call_instr->getArgOperand(0); + if (argument == nullptr) + { + llvm::errs() << "Failed getting the size argument\n"; + return; + } + + // Checking named values + auto it = value_depending_on_args_.find(argument); + if (it != value_depending_on_args_.end()) + { + QubitArray qubit_array; + qubit_array.is_possibly_static = true; + qubit_array.variable_name = instruction.getName().str(); + qubit_array.depends_on = it->second; + + // Pushing to the result + results_.push_back(std::move(qubit_array)); + return; + } + + // Checking if it is a constant value + auto cst = llvm::dyn_cast(argument); + if (cst != nullptr) + { + QubitArray qubit_array; + qubit_array.is_possibly_static = true; + qubit_array.variable_name = instruction.getName().str(); + + // Pushing to the result + results_.push_back(std::move(qubit_array)); + + return; + } + + // Non-static array + QubitArray qubit_array; + qubit_array.is_possibly_static = false; + qubit_array.variable_name = instruction.getName().str(); + results_.push_back(std::move(qubit_array)); +} + +void ConstSizeArrayAnalysisAnalytics::analyseFunction(llvm::Function &function) +{ + results_.clear(); + + // Creating a list with function arguments + for (auto &arg : function.args()) + { + auto s = arg.getName().str(); + value_depending_on_args_.insert({&arg, {s}}); + } + + // Evaluating all expressions + for (auto &basic_block : function) + { + for (auto &instruction : basic_block) + { - // Checking if the value is constant - auto cst = llvm::dyn_cast(size_value); - if (cst == nullptr) + auto opcode = instruction.getOpcode(); + switch (opcode) { - continue; + case llvm::Instruction::Sub: + case llvm::Instruction::Add: + case llvm::Instruction::Mul: + case llvm::Instruction::Shl: + case llvm::Instruction::LShr: + case llvm::Instruction::AShr: + case llvm::Instruction::And: + case llvm::Instruction::Or: + case llvm::Instruction::Xor: + if (operandsConstant(instruction)) + { + markPossibleConstant(instruction); + } + break; + case llvm::Instruction::Call: + analyseCall(instruction); + break; + // Unanalysed statements + case llvm::Instruction::Ret: + case llvm::Instruction::Br: + case llvm::Instruction::Switch: + case llvm::Instruction::IndirectBr: + case llvm::Instruction::Invoke: + case llvm::Instruction::Resume: + case llvm::Instruction::Unreachable: + case llvm::Instruction::CleanupRet: + case llvm::Instruction::CatchRet: + case llvm::Instruction::CatchSwitch: + case llvm::Instruction::CallBr: + case llvm::Instruction::FNeg: + case llvm::Instruction::FAdd: + case llvm::Instruction::FSub: + case llvm::Instruction::FMul: + case llvm::Instruction::UDiv: + case llvm::Instruction::SDiv: + case llvm::Instruction::FDiv: + case llvm::Instruction::URem: + case llvm::Instruction::SRem: + case llvm::Instruction::FRem: + case llvm::Instruction::Alloca: + case llvm::Instruction::Load: + case llvm::Instruction::Store: + case llvm::Instruction::GetElementPtr: + case llvm::Instruction::Fence: + case llvm::Instruction::AtomicCmpXchg: + case llvm::Instruction::AtomicRMW: + case llvm::Instruction::Trunc: + case llvm::Instruction::ZExt: + case llvm::Instruction::SExt: + case llvm::Instruction::FPToUI: + case llvm::Instruction::FPToSI: + case llvm::Instruction::UIToFP: + case llvm::Instruction::SIToFP: + case llvm::Instruction::FPTrunc: + case llvm::Instruction::FPExt: + case llvm::Instruction::PtrToInt: + case llvm::Instruction::IntToPtr: + case llvm::Instruction::BitCast: + case llvm::Instruction::AddrSpaceCast: + case llvm::Instruction::CleanupPad: + case llvm::Instruction::CatchPad: + case llvm::Instruction::ICmp: + case llvm::Instruction::FCmp: + case llvm::Instruction::PHI: + case llvm::Instruction::Select: + case llvm::Instruction::UserOp1: + case llvm::Instruction::UserOp2: + case llvm::Instruction::VAArg: + case llvm::Instruction::ExtractElement: + case llvm::Instruction::InsertElement: + case llvm::Instruction::ShuffleVector: + case llvm::Instruction::ExtractValue: + case llvm::Instruction::InsertValue: + case llvm::Instruction::LandingPad: + // End of Binary Ops + default: + break; } - - result[name] = cst->getValue().getSExtValue(); } } +} + +ConstSizeArrayAnalysisAnalytics::Result ConstSizeArrayAnalysisAnalytics::run( + llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) +{ + analyseFunction(function); - return result; + return results_; } ConstSizeArrayAnalysisPrinter::ConstSizeArrayAnalysisPrinter(llvm::raw_ostream &out_stream) @@ -85,9 +257,14 @@ llvm::PreservedAnalyses ConstSizeArrayAnalysisPrinter::run(llvm::Function & out_stream_ << function.getName() << "\n"; out_stream_ << "====================" << "\n\n"; - for (auto const &size_info : results) + for (auto const &ret : results) { - out_stream_ << size_info.first() << ": " << size_info.second << "\n"; + out_stream_ << ret.variable_name << (ret.is_possibly_static ? ": " : "!"); + for (auto &x : ret.depends_on) + { + out_stream_ << x << ", "; + } + out_stream_ << "\n"; } } diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp index b30abcf9b9..5e2c1b230e 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp +++ b/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp @@ -4,6 +4,9 @@ #include "Llvm.hpp" +#include +#include + namespace microsoft { namespace quantum { @@ -11,7 +14,24 @@ class ConstSizeArrayAnalysisAnalytics : public llvm::AnalysisInfoMixin { public: - using Result = llvm::StringMap; ///< Change the type of the collected date here + using String = std::string; + using ArgList = std::unordered_set; + + struct QubitArray + { + bool is_possibly_static{false}; ///< Indicates whether the array is possibly static or not + String variable_name{}; ///< Name of the qubit array + ArgList depends_on{}; ///< Function arguments that determines if it is constant or not + }; + + using Value = llvm::Value; + using DependencyGraph = std::unordered_map; + using ValueDependencyGraph = std::unordered_map; + + using Instruction = llvm::Instruction; + using Function = llvm::Function; + using QubitArrayList = std::vector; + using Result = QubitArrayList; /// Constructors and destructors /// @{ @@ -32,9 +52,31 @@ class ConstSizeArrayAnalysisAnalytics Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/); /// @} + /// Function analysis + /// @{ + void analyseFunction(llvm::Function &function); + /// @} + + /// Instruction analysis + /// @{ + bool operandsConstant(Instruction const &instruction) const; + void markPossibleConstant(Instruction &instruction); + void analyseCall(Instruction &instruction); + /// @} + private: static llvm::AnalysisKey Key; // NOLINT friend struct llvm::AnalysisInfoMixin; + + /// Analysis details + /// @{ + ValueDependencyGraph value_depending_on_args_{}; + /// @} + + /// Result + /// @{ + QubitArrayList results_{}; + /// @} }; class ConstSizeArrayAnalysisPrinter : public llvm::PassInfoMixin From bca2646b3d79ed83af9fa7bc42684142ab303c04 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 12:56:43 +0200 Subject: [PATCH 37/48] Refactoring pass --- src/Passes/examples/ConstSizeArrayAnalysis/Makefile | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile index 4050079dfe..7db0c3e199 100644 --- a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile +++ b/src/Passes/examples/ConstSizeArrayAnalysis/Makefile @@ -1,7 +1,6 @@ run: build -# opt -load-pass-plugin ../../Debug/libs/libMetaData.dylib --passes="meta-data" -S analysis-problem-4.ll - opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem-4.ll + opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-problem-4.ll build: - pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make ConstSizeArrayAnalysis && popd || popd - pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make MetaData && popd || popd + pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make QubitAllocationAnalysis && popd || popd + From 75c22f6e0dfd65b92a4995276bd8a4173135d3d4 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 12:58:12 +0200 Subject: [PATCH 38/48] More refactoring --- .../ConstSizeArray/Comparison.cpp | 0 .../ConstSizeArray/ConstSizeArray.csproj | 0 .../ConstSizeArray/ConstSizeArray.qs | 0 .../ConstSizeArray/Makefile | 0 .../ConstSizeArray/comparison.ll | 0 .../Makefile | 0 .../README.md | 0 .../analysis-problem-1.ll | 0 .../analysis-problem-2.ll | 0 .../analysis-problem-3.ll | 0 .../analysis-problem-4.ll | 0 .../LibQubitAllocationAnalysis.cpp} | 16 ++-- .../QubitAllocationAnalysis.cpp} | 86 +++++++++++-------- .../QubitAllocationAnalysis.hpp} | 45 +++++----- .../SPECIFICATION.md | 0 15 files changed, 78 insertions(+), 69 deletions(-) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/ConstSizeArray/Comparison.cpp (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/ConstSizeArray/ConstSizeArray.csproj (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/ConstSizeArray/ConstSizeArray.qs (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/ConstSizeArray/Makefile (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/ConstSizeArray/comparison.ll (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/Makefile (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/README.md (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/analysis-problem-1.ll (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/analysis-problem-2.ll (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/analysis-problem-3.ll (100%) rename src/Passes/examples/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/analysis-problem-4.ll (100%) rename src/Passes/libs/{ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp => QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp} (63%) rename src/Passes/libs/{ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp => QubitAllocationAnalysis/QubitAllocationAnalysis.cpp} (69%) rename src/Passes/libs/{ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp => QubitAllocationAnalysis/QubitAllocationAnalysis.hpp} (52%) rename src/Passes/libs/{ConstSizeArrayAnalysis => QubitAllocationAnalysis}/SPECIFICATION.md (100%) diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Comparison.cpp b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Comparison.cpp similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Comparison.cpp rename to src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Comparison.cpp diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.csproj b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.csproj rename to src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.csproj diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/ConstSizeArray.qs rename to src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/Makefile rename to src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/comparison.ll b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/comparison.ll similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/ConstSizeArray/comparison.ll rename to src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/comparison.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/Makefile rename to src/Passes/examples/QubitAllocationAnalysis/Makefile diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/README.md b/src/Passes/examples/QubitAllocationAnalysis/README.md similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/README.md rename to src/Passes/examples/QubitAllocationAnalysis/README.md diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-1.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-1.ll similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-1.ll rename to src/Passes/examples/QubitAllocationAnalysis/analysis-problem-1.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-2.ll similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-2.ll rename to src/Passes/examples/QubitAllocationAnalysis/analysis-problem-2.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-3.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-3.ll similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-3.ll rename to src/Passes/examples/QubitAllocationAnalysis/analysis-problem-3.ll diff --git a/src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-4.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-4.ll similarity index 100% rename from src/Passes/examples/ConstSizeArrayAnalysis/analysis-problem-4.ll rename to src/Passes/examples/QubitAllocationAnalysis/analysis-problem-4.ll diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp b/src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp similarity index 63% rename from src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp rename to src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp index 91d766c9b8..358514329b 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/LibConstSizeArrayAnalysis.cpp +++ b/src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp @@ -2,26 +2,26 @@ // Licensed under the MIT License. #include "Llvm.hpp" -#include "ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp" +#include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" #include #include namespace { // Interface to plugin -llvm::PassPluginLibraryInfo getConstSizeArrayAnalysisPluginInfo() +llvm::PassPluginLibraryInfo getQubitAllocationAnalysisPluginInfo() { using namespace microsoft::quantum; using namespace llvm; return { - LLVM_PLUGIN_API_VERSION, "ConstSizeArrayAnalysis", LLVM_VERSION_STRING, [](PassBuilder &pb) { + LLVM_PLUGIN_API_VERSION, "QubitAllocationAnalysis", LLVM_VERSION_STRING, [](PassBuilder &pb) { // Registering a printer for the anaylsis pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, ArrayRef /*unused*/) { - if (name == "print") + if (name == "print") { - fpm.addPass(ConstSizeArrayAnalysisPrinter(llvm::errs())); + fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); return true; } return false; @@ -29,12 +29,12 @@ llvm::PassPluginLibraryInfo getConstSizeArrayAnalysisPluginInfo() pb.registerVectorizerStartEPCallback( [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { - fpm.addPass(ConstSizeArrayAnalysisPrinter(llvm::errs())); + fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); }); // Registering the analysis module pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { - fam.registerPass([] { return ConstSizeArrayAnalysisAnalytics(); }); + fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); }); }}; } @@ -43,5 +43,5 @@ llvm::PassPluginLibraryInfo getConstSizeArrayAnalysisPluginInfo() extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getConstSizeArrayAnalysisPluginInfo(); + return getQubitAllocationAnalysisPluginInfo(); } diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp similarity index 69% rename from src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp rename to src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp index 2a8d0eaab6..12dfce428d 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.cpp +++ b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp @@ -1,9 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp" - #include "Llvm.hpp" +#include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" #include #include @@ -12,15 +11,22 @@ namespace microsoft { namespace quantum { -bool ConstSizeArrayAnalysisAnalytics::operandsConstant(Instruction const &instruction) const +bool QubitAllocationAnalysisAnalytics::operandsConstant(Instruction const &instruction) const { + // Default is true (i.e. the case of no operands) bool ret = true; // Checking that all oprands are constant for (auto &op : instruction.operands()) { - auto const_arg = value_depending_on_args_.find(op) != value_depending_on_args_.end(); + // An operand is constant if its value was previously generated from + // a const expression ... + auto const_arg = constantness_dependencies_.find(op) != constantness_dependencies_.end(); + + // ... or if it is just a compile time constant. Note that we + // delibrately only consider integers. We may expand this + // to other constants once we have function support. auto cst = llvm::dyn_cast(op); auto is_constant = (cst != nullptr); @@ -30,27 +36,18 @@ bool ConstSizeArrayAnalysisAnalytics::operandsConstant(Instruction const &instru return ret; } -void ConstSizeArrayAnalysisAnalytics::markPossibleConstant(Instruction &instruction) +void QubitAllocationAnalysisAnalytics::markPossibleConstant(Instruction &instruction) { - /* - // Rename constant variables - if (!instruction.hasName()) - { - // Naming result - char new_name[64] = {0}; - auto fmt = llvm::format("microsoft_reserved_possible_const_ret%u", tmp_counter_); - fmt.print(new_name, 64); - instruction.setName(new_name); - } - */ - // Creating arg dependencies ArgList all_dependencies{}; for (auto &op : instruction.operands()) { - auto it = value_depending_on_args_.find(op); - if (it != value_depending_on_args_.end()) + // If the operand has dependecies ... + auto it = constantness_dependencies_.find(op); + if (it != constantness_dependencies_.end()) { + // ... we add these as a dependency for the + // resulting instructions value for (auto &arg : it->second) { all_dependencies.insert(arg); @@ -58,11 +55,11 @@ void ConstSizeArrayAnalysisAnalytics::markPossibleConstant(Instruction &instruct } } - // Adding the new name to the list - value_depending_on_args_.insert({&instruction, all_dependencies}); + // Adding full list of dependices to the dependency graph + constantness_dependencies_.insert({&instruction, all_dependencies}); } -void ConstSizeArrayAnalysisAnalytics::analyseCall(Instruction &instruction) +void QubitAllocationAnalysisAnalytics::analyseCall(Instruction &instruction) { // Skipping debug code if (instruction.isDebugOrPseudoInst()) @@ -70,27 +67,32 @@ void ConstSizeArrayAnalysisAnalytics::analyseCall(Instruction &instruction) return; } + // Recovering the call information auto *call_instr = llvm::dyn_cast(&instruction); if (call_instr == nullptr) { return; } + // Getting the name of the function being called auto target_function = call_instr->getCalledFunction(); auto name = target_function->getName(); - // TODO(tfr): Make use of TargetLibrary + // TODO(tfr): Make use of TargetLibraryInfo if (name != "__quantum__rt__qubit_allocate_array") { return; } + // We expect only a single argument with the number + // of qubits allocated if (call_instr->arg_size() != 1) { llvm::errs() << "Expected exactly one argument\n"; return; } + // Next we extract the argument ... auto argument = call_instr->getArgOperand(0); if (argument == nullptr) { @@ -98,10 +100,12 @@ void ConstSizeArrayAnalysisAnalytics::analyseCall(Instruction &instruction) return; } - // Checking named values - auto it = value_depending_on_args_.find(argument); - if (it != value_depending_on_args_.end()) + // ... and checks whether it is a result of a dependant + // const expression + auto it = constantness_dependencies_.find(argument); + if (it != constantness_dependencies_.end()) { + // If it is, we add the details to the result list QubitArray qubit_array; qubit_array.is_possibly_static = true; qubit_array.variable_name = instruction.getName().str(); @@ -112,7 +116,8 @@ void ConstSizeArrayAnalysisAnalytics::analyseCall(Instruction &instruction) return; } - // Checking if it is a constant value + // Otherwise, it may be a static allocation based on a constant (or + // folded constant) auto cst = llvm::dyn_cast(argument); if (cst != nullptr) { @@ -126,22 +131,26 @@ void ConstSizeArrayAnalysisAnalytics::analyseCall(Instruction &instruction) return; } - // Non-static array + // If neither of the previous is the case, we are dealing with a non-static array QubitArray qubit_array; qubit_array.is_possibly_static = false; qubit_array.variable_name = instruction.getName().str(); + + // Storing the result results_.push_back(std::move(qubit_array)); } -void ConstSizeArrayAnalysisAnalytics::analyseFunction(llvm::Function &function) +void QubitAllocationAnalysisAnalytics::analyseFunction(llvm::Function &function) { + // Clearing results generated in a previous run results_.clear(); + constantness_dependencies_.clear(); // Creating a list with function arguments for (auto &arg : function.args()) { auto s = arg.getName().str(); - value_depending_on_args_.insert({&arg, {s}}); + constantness_dependencies_.insert({&arg, {s}}); } // Evaluating all expressions @@ -149,7 +158,6 @@ void ConstSizeArrayAnalysisAnalytics::analyseFunction(llvm::Function &function) { for (auto &instruction : basic_block) { - auto opcode = instruction.getOpcode(); switch (opcode) { @@ -235,22 +243,24 @@ void ConstSizeArrayAnalysisAnalytics::analyseFunction(llvm::Function &function) } } -ConstSizeArrayAnalysisAnalytics::Result ConstSizeArrayAnalysisAnalytics::run( +QubitAllocationAnalysisAnalytics::Result QubitAllocationAnalysisAnalytics::run( llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) { + // Running functin analysis analyseFunction(function); + // ... and return the result. return results_; } -ConstSizeArrayAnalysisPrinter::ConstSizeArrayAnalysisPrinter(llvm::raw_ostream &out_stream) +QubitAllocationAnalysisPrinter::QubitAllocationAnalysisPrinter(llvm::raw_ostream &out_stream) : out_stream_(out_stream) {} -llvm::PreservedAnalyses ConstSizeArrayAnalysisPrinter::run(llvm::Function & function, - llvm::FunctionAnalysisManager &fam) +llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run(llvm::Function & function, + llvm::FunctionAnalysisManager &fam) { - auto &results = fam.getResult(function); + auto &results = fam.getResult(function); if (!results.empty()) { @@ -271,12 +281,12 @@ llvm::PreservedAnalyses ConstSizeArrayAnalysisPrinter::run(llvm::Function & return llvm::PreservedAnalyses::all(); } -bool ConstSizeArrayAnalysisPrinter::isRequired() +bool QubitAllocationAnalysisPrinter::isRequired() { return true; } -llvm::AnalysisKey ConstSizeArrayAnalysisAnalytics::Key; +llvm::AnalysisKey QubitAllocationAnalysisAnalytics::Key; } // namespace quantum } // namespace microsoft diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp similarity index 52% rename from src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp rename to src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp index 5e2c1b230e..865e2f5fbb 100644 --- a/src/Passes/libs/ConstSizeArrayAnalysis/ConstSizeArrayAnalysis.hpp +++ b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp @@ -10,8 +10,8 @@ namespace microsoft { namespace quantum { -class ConstSizeArrayAnalysisAnalytics - : public llvm::AnalysisInfoMixin +class QubitAllocationAnalysisAnalytics + : public llvm::AnalysisInfoMixin { public: using String = std::string; @@ -28,23 +28,22 @@ class ConstSizeArrayAnalysisAnalytics using DependencyGraph = std::unordered_map; using ValueDependencyGraph = std::unordered_map; - using Instruction = llvm::Instruction; - using Function = llvm::Function; - using QubitArrayList = std::vector; - using Result = QubitArrayList; + using Instruction = llvm::Instruction; + using Function = llvm::Function; + using Result = std::vector; /// Constructors and destructors /// @{ - ConstSizeArrayAnalysisAnalytics() = default; - ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics const &) = delete; - ConstSizeArrayAnalysisAnalytics(ConstSizeArrayAnalysisAnalytics &&) = default; - ~ConstSizeArrayAnalysisAnalytics() = default; + QubitAllocationAnalysisAnalytics() = default; + QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics const &) = delete; + QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics &&) = default; + ~QubitAllocationAnalysisAnalytics() = default; /// @} /// Operators /// @{ - ConstSizeArrayAnalysisAnalytics &operator=(ConstSizeArrayAnalysisAnalytics const &) = delete; - ConstSizeArrayAnalysisAnalytics &operator=(ConstSizeArrayAnalysisAnalytics &&) = delete; + QubitAllocationAnalysisAnalytics &operator=(QubitAllocationAnalysisAnalytics const &) = delete; + QubitAllocationAnalysisAnalytics &operator=(QubitAllocationAnalysisAnalytics &&) = delete; /// @} /// Functions required by LLVM @@ -66,35 +65,35 @@ class ConstSizeArrayAnalysisAnalytics private: static llvm::AnalysisKey Key; // NOLINT - friend struct llvm::AnalysisInfoMixin; + friend struct llvm::AnalysisInfoMixin; /// Analysis details /// @{ - ValueDependencyGraph value_depending_on_args_{}; + ValueDependencyGraph constantness_dependencies_{}; /// @} /// Result /// @{ - QubitArrayList results_{}; + Result results_{}; /// @} }; -class ConstSizeArrayAnalysisPrinter : public llvm::PassInfoMixin +class QubitAllocationAnalysisPrinter : public llvm::PassInfoMixin { public: /// Constructors and destructors /// @{ - explicit ConstSizeArrayAnalysisPrinter(llvm::raw_ostream &out_stream); - ConstSizeArrayAnalysisPrinter() = delete; - ConstSizeArrayAnalysisPrinter(ConstSizeArrayAnalysisPrinter const &) = delete; - ConstSizeArrayAnalysisPrinter(ConstSizeArrayAnalysisPrinter &&) = default; - ~ConstSizeArrayAnalysisPrinter() = default; + explicit QubitAllocationAnalysisPrinter(llvm::raw_ostream &out_stream); + QubitAllocationAnalysisPrinter() = delete; + QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter const &) = delete; + QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter &&) = default; + ~QubitAllocationAnalysisPrinter() = default; /// @} /// Operators /// @{ - ConstSizeArrayAnalysisPrinter &operator=(ConstSizeArrayAnalysisPrinter const &) = delete; - ConstSizeArrayAnalysisPrinter &operator=(ConstSizeArrayAnalysisPrinter &&) = delete; + QubitAllocationAnalysisPrinter &operator=(QubitAllocationAnalysisPrinter const &) = delete; + QubitAllocationAnalysisPrinter &operator=(QubitAllocationAnalysisPrinter &&) = delete; /// @} /// Functions required by LLVM diff --git a/src/Passes/libs/ConstSizeArrayAnalysis/SPECIFICATION.md b/src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md similarity index 100% rename from src/Passes/libs/ConstSizeArrayAnalysis/SPECIFICATION.md rename to src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md From ab7ddf64d19f7843d5d5b3bd52fcf74a7e85b39e Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 13:42:44 +0200 Subject: [PATCH 39/48] Adding documenation --- .../ConstSizeArray/ConstSizeArray.qs | 2 +- .../ConstSizeArray/Makefile | 6 + .../ConstSizeArray/comparison.ll | 75 ------ .../examples/QubitAllocationAnalysis/Makefile | 11 +- .../QubitAllocationAnalysis/README.md | 235 +++++++++++------- ...lysis-problem-4.ll => analysis-example.ll} | 4 + .../analysis-problem-1.ll | 50 ---- .../analysis-problem-2.ll | 51 ---- .../analysis-problem-3.ll | 52 ---- .../QubitAllocationAnalysis.cpp | 33 ++- .../QubitAllocationAnalysis.hpp | 11 +- .../QubitAllocationAnalysis/SPECIFICATION.md | 9 + 12 files changed, 208 insertions(+), 331 deletions(-) delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/comparison.ll rename src/Passes/examples/QubitAllocationAnalysis/{analysis-problem-4.ll => analysis-example.ll} (91%) delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/analysis-problem-1.ll delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/analysis-problem-2.ll delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/analysis-problem-3.ll diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index ae895648ed..c8c78d1aea 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -1,5 +1,4 @@ namespace Example { - @EntryPoint() operation Main() : Int { @@ -17,6 +16,7 @@ namespace Example { let z = x * (x + 1) - 47; let y = 3 * x; + use qubits0 = Qubit[9]; use qubits1 = Qubit[(y - 2)/2-z]; use qubits2 = Qubit[y - g]; use qubits3 = Qubit[h]; diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile index fa97ab5b45..59399d367e 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/Makefile @@ -1,5 +1,11 @@ +analysis-example.ll: + dotnet build ConstSizeArray.csproj + opt -S qir/ConstSizeArray.ll -O1 > ../analysis-example.ll + make clean + comparison: clang++ -S -emit-llvm -std=c++14 -stdlib=libc++ Comparison.cpp -o comparison.ll + clean: rm -rf bin rm -rf obj diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/comparison.ll b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/comparison.ll deleted file mode 100644 index 0e2c5308a3..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/comparison.ll +++ /dev/null @@ -1,75 +0,0 @@ -; ModuleID = 'Comparison.cpp' -source_filename = "Comparison.cpp" -target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-apple-macosx11.0.0" - -; Function Attrs: noinline nounwind optnone ssp uwtable mustprogress -define dso_local void @_Z15QuantumFunctioni(i32 %0) #0 { - %2 = alloca i32, align 4 - %3 = alloca i64, align 8 - %4 = alloca i64, align 8 - %5 = alloca i8*, align 8 - %6 = alloca i64, align 8 - store i32 %0, i32* %2, align 4 - store volatile i64 3, i64* %3, align 8 - store i64 0, i64* %4, align 8 - br label %7 - -7: ; preds = %15, %1 - %8 = load i64, i64* %4, align 8 - %9 = load volatile i64, i64* %3, align 8 - %10 = icmp ult i64 %8, %9 - br i1 %10, label %11, label %18 - -11: ; preds = %7 - %12 = load i32, i32* %2, align 4 - %13 = load i32, i32* %2, align 4 - %14 = add nsw i32 %13, %12 - store i32 %14, i32* %2, align 4 - br label %15 - -15: ; preds = %11 - %16 = load i64, i64* %4, align 8 - %17 = add i64 %16, 1 - store i64 %17, i64* %4, align 8 - br label %7, !llvm.loop !3 - -18: ; preds = %7 - %19 = load i32, i32* %2, align 4 - %20 = zext i32 %19 to i64 - %21 = call i8* @llvm.stacksave() - store i8* %21, i8** %5, align 8 - %22 = alloca i32, i64 %20, align 16 - store i64 %20, i64* %6, align 8 - %23 = load i8*, i8** %5, align 8 - call void @llvm.stackrestore(i8* %23) - ret void -} - -; Function Attrs: nofree nosync nounwind willreturn -declare i8* @llvm.stacksave() #1 - -; Function Attrs: nofree nosync nounwind willreturn -declare void @llvm.stackrestore(i8*) #1 - -; Function Attrs: noinline norecurse nounwind optnone ssp uwtable mustprogress -define dso_local i32 @main() #2 { - %1 = alloca i32, align 4 - store i32 0, i32* %1, align 4 - call void @_Z15QuantumFunctioni(i32 10) - call void @_Z15QuantumFunctioni(i32 3) - ret i32 0 -} - -attributes #0 = { noinline nounwind optnone ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nofree nosync nounwind willreturn } -attributes #2 = { noinline norecurse nounwind optnone ssp uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } - -!llvm.module.flags = !{!0, !1} -!llvm.ident = !{!2} - -!0 = !{i32 1, !"wchar_size", i32 4} -!1 = !{i32 7, !"PIC Level", i32 2} -!2 = !{!"Homebrew clang version 12.0.1"} -!3 = distinct !{!3, !4} -!4 = !{!"llvm.loop.mustprogress"} diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index 7db0c3e199..dd808878eb 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -1,6 +1,13 @@ -run: build - opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-problem-4.ll +run: build analysis-example.ll + opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll build: pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make QubitAllocationAnalysis && popd || popd + +analysis-example.ll: + cd ConstSizeArray && make analysis-example.ll + +clean: + cd ConstSizeArray && make clean + rm analysis-example.ll \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/README.md b/src/Passes/examples/QubitAllocationAnalysis/README.md index fba3828ece..383ee026ed 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/README.md +++ b/src/Passes/examples/QubitAllocationAnalysis/README.md @@ -1,160 +1,209 @@ -# ConstSizeArray +# QubitAllocationAnalysis -## Running the analysis +## Quick start -Ensure that you have build the latest version of the pass +The following depnds on: + +- A working LLVM installation, including paths correctly setup +- CMake +- C#, Q# and the .NET framework + +Running following command ```sh -% make build +% make run ``` +will first build the pass, then build the QIR using Q# following by removing the noise using `opt` with optimisation level 1. Finally, it will execute the analysis pass and should provide you with information about qubit allocation in the Q# program defined in `ConstSizeArray/ConstSizeArray.qs`. + +## Detailed run + +From the Passes root (two levels up from this directory), make a new build + ```sh -% opt -load-pass-plugin ../../Debug/libs/libConstSizeArrayAnalysis.dylib --passes="print" -disable-output analysis-problem.ll +% mkdir Debug +% cd Debug +% cmake .. ``` -## Cases to consider +and then compile the `QubitAllocationAnalysis`: -```qsharp -namespace Example { +```sh +% make QubitAllocationAnalysis +``` - @EntryPoint() - operation Main() : Int - { - QuantumProgram(); - return 0; - } +Next return `examples/QubitAllocationAnalysis` and enter the directory `ConstSizeArray` to build the QIR: +```sh +% make analysis-example.ll +``` +or execute the commands manually, - operation QuantumProgram(x: Int) : Unit { - use qubits = Qubit[3]; - } +```sh +% dotnet build ConstSizeArray.csproj +% opt -S qir/ConstSizeArray.ll -O1 > ../analysis-example.ll +% make clean +``` -} +Returning to `examples/QubitAllocationAnalysis`, the pass can now be ran by executing: + +```sh +% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll ``` +## Example cases + +Below we will consider a few different examples. You can run them by updating the code in `ConstSizeArray/ConstSizeArray.qs` and executing `make run` from the `examples/QubitAllocationAnalysis` folder subsequently. You will need to delete `analysis-example.ll` between runs. + +### Trivially constant + +This is the simplest example we can think of: + ```qsharp namespace Example { - @EntryPoint() - operation Main() : Int - { - QuantumProgram(3); - QuantumProgram(4); - return 0; + operation QuantumProgram() : Unit { + use qubits = Qubit[3]; } +} +``` +The corresponding QIR is: +``` +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" - operation QuantumProgram(x: Int) : Unit { - use qubits = Qubit[x]; - } +%Array = type opaque +define internal fastcc void @Example__QuantumProgram__body() unnamed_addr { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void } + +; (...) ``` -```qsharp -namespace Example { +Running the pass procudes following output: - @EntryPoint() - operation Main() : Int - { - QuantumProgram(3); - QuantumProgram(4); - return 0; - } +``` +% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +Example__QuantumProgram__body +==================== +qubits is trivially static with 3 qubits. +``` - operation QuantumProgram(x: Int) : Unit { - use qubits = Qubit[x * x]; - } +### Dependency case -} -``` +In some cases, a qubit array will be compile time constant in size if the function arguments +provided are compile-time constants. One example of this is: -```qsharp +``` namespace Example { - @EntryPoint() operation Main() : Int { - QuantumProgram(ComputeNumberOfQubits); + QuantumProgram(3); + QuantumProgram(4); return 0; } - function ComputeNumberOfQubits(x: Int): Int { - return x * x; + operation QuantumProgram(x: Int) : Unit { + use qubits = Qubit[x]; } +} +``` - operation QuantumProgram(fnc : Int -> Int) : Unit { - use qubits = Qubit[fnc(3)]; - } +The corresponding QIR is -} ``` +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" -## Generating an example QIR +%Array = type opaque +%String = type opaque -Build the QIR +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumProgram__body(i64 3) + call fastcc void @Example__QuantumProgram__body(i64 4) + ret void +} + +define internal fastcc void @Example__QuantumProgram__body(i64 %x) unnamed_addr { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %x) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void +} +; ( ... ) -```sh -cd ConstSizeArray -dotnet build ConstSizeArray.csproj ``` -Strip it of unecessary information +The analyser returns following output: -```sh -opt -S qir/ConstSizeArray.ll -O1 > qir/Problem.ll ``` +% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll -Result should be similar to +Example__QuantumProgram__body +==================== -``` -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" +qubits depends on x being constant to be static. -%Array = type opaque -%String = type opaque +``` -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr +### Summary case -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr +Finally, we do a summary case that demonstrates some of the more elaborate cases: -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr +``` +namespace Example { + @EntryPoint() + operation Main() : Int + { + QuantumProgram(3,2,1); + QuantumProgram(4,9,4); + return 0; + } -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + function X(value: Int): Int + { + return 3 * value; + } -define i64 @Example__Main__Interop() local_unnamed_addr #0 { -entry: - %qubits.i.i = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 10) - tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 1) - tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 -1) - tail call void @__quantum__rt__qubit_release_array(%Array* %qubits.i.i) - ret i64 0 -} + operation QuantumProgram(x: Int, h: Int, g: Int) : Unit { + let z = x * (x + 1) - 47; + let y = 3 * x; -define void @Example__Main() local_unnamed_addr #1 { -entry: - %qubits.i.i = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 10) - tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 1) - tail call void @__quantum__rt__array_update_alias_count(%Array* %qubits.i.i, i32 -1) - tail call void @__quantum__rt__qubit_release_array(%Array* %qubits.i.i) - %0 = tail call %String* @__quantum__rt__int_to_string(i64 0) - tail call void @__quantum__rt__message(%String* %0) - tail call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void + use qubits0 = Qubit[9]; + use qubits1 = Qubit[(y - 2)/2-z]; + use qubits2 = Qubit[y - g]; + use qubits3 = Qubit[h]; + use qubits4 = Qubit[X(x)]; + } } +``` -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr +We will omit the QIR in the documenation as it is a long. The output of the anaysis is: -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } ``` +% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll -# Notes +Example__QuantumProgram__body +==================== -To make a proper version of Const Size deduction, look at the [constant folding](https://llvm.org/doxygen/ConstantFolding_8cpp_source.html) implementation and in particular, the [target library](https://llvm.org/doxygen/classllvm_1_1TargetLibraryInfo.html) which essentially promotes information about the runtime. +qubits0 is trivially static with 9 qubits. +qubits1 depends on x being constant to be static. +qubits2 depends on x, g being constant to be static. +qubits3 depends on h being constant to be static. +qubits4 is dynamic. +``` diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-4.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll similarity index 91% rename from src/Passes/examples/QubitAllocationAnalysis/analysis-problem-4.ll rename to src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll index 420563f193..b87010d9d2 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-4.ll +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -17,6 +17,8 @@ entry: %.neg1 = mul i64 %.neg, %x %z.neg = add i64 %.neg1, 47 %y = mul i64 %x, 3 + %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) %0 = add i64 %y, -2 %1 = lshr i64 %0, 1 %2 = add i64 %z.neg, %1 @@ -38,6 +40,8 @@ entry: call void @__quantum__rt__qubit_release_array(%Array* %qubits2) call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) call void @__quantum__rt__qubit_release_array(%Array* %qubits1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits0) ret void } diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-1.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-1.ll deleted file mode 100644 index 01315ee268..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-1.ll +++ /dev/null @@ -1,50 +0,0 @@ -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Array = type opaque -%String = type opaque - -define internal fastcc void @Example__Main__body() unnamed_addr { -entry: - call fastcc void @Example__QuantumFunction__body() - ret void -} - -define internal fastcc void @Example__QuantumFunction__body() unnamed_addr { -entry: - %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 10) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits) - ret void -} - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define i64 @Example__Main__Interop() local_unnamed_addr #0 { -entry: - call fastcc void @Example__Main__body() - ret i64 0 -} - -define void @Example__Main() local_unnamed_addr #1 { -entry: - call fastcc void @Example__Main__body() - %0 = call %String* @__quantum__rt__int_to_string(i64 0) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-2.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-2.ll deleted file mode 100644 index ea4ead0400..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-2.ll +++ /dev/null @@ -1,51 +0,0 @@ -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Array = type opaque -%String = type opaque - -define internal fastcc void @Example__Main__body() unnamed_addr { -entry: - call fastcc void @Example__QuantumProgram__body(i64 3) - call fastcc void @Example__QuantumProgram__body(i64 4) - ret void -} - -define internal fastcc void @Example__QuantumProgram__body(i64 %x) unnamed_addr { -entry: - %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %x) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits) - ret void -} - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define i64 @Example__Main__Interop() local_unnamed_addr #0 { -entry: - call fastcc void @Example__Main__body() - ret i64 0 -} - -define void @Example__Main() local_unnamed_addr #1 { -entry: - call fastcc void @Example__Main__body() - %0 = call %String* @__quantum__rt__int_to_string(i64 0) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-3.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-3.ll deleted file mode 100644 index 65703f0d8e..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/analysis-problem-3.ll +++ /dev/null @@ -1,52 +0,0 @@ -; ModuleID = 'qir/ConstSizeArray.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Array = type opaque -%String = type opaque - -define internal fastcc void @Example__Main__body() unnamed_addr { -entry: - call fastcc void @Example__QuantumProgram__body(i64 3) - call fastcc void @Example__QuantumProgram__body(i64 4) - ret void -} - -define internal fastcc void @Example__QuantumProgram__body(i64 %x) unnamed_addr { -entry: - %0 = mul i64 %x, %x - %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %0) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits) - ret void -} - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define i64 @Example__Main__Interop() local_unnamed_addr #0 { -entry: - call fastcc void @Example__Main__body() - ret i64 0 -} - -define void @Example__Main() local_unnamed_addr #1 { -entry: - call fastcc void @Example__Main__body() - %0 = call %String* @__quantum__rt__int_to_string(i64 0) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr - -attributes #0 = { "InteropFriendly" } -attributes #1 = { "EntryPoint" } diff --git a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp index 12dfce428d..0da8007336 100644 --- a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp +++ b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp @@ -1,9 +1,10 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "Llvm.hpp" #include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" +#include "Llvm.hpp" + #include #include #include @@ -124,6 +125,7 @@ void QubitAllocationAnalysisAnalytics::analyseCall(Instruction &instruction) QubitArray qubit_array; qubit_array.is_possibly_static = true; qubit_array.variable_name = instruction.getName().str(); + qubit_array.size = cst->getZExtValue(); // Pushing to the result results_.push_back(std::move(qubit_array)); @@ -269,11 +271,34 @@ llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run(llvm::Function & << "\n\n"; for (auto const &ret : results) { - out_stream_ << ret.variable_name << (ret.is_possibly_static ? ": " : "!"); - for (auto &x : ret.depends_on) + if (!ret.is_possibly_static) + { + out_stream_ << ret.variable_name << " is dynamic.\n"; + } + else { - out_stream_ << x << ", "; + if (ret.depends_on.empty()) + { + out_stream_ << ret.variable_name << " is trivially static with " << ret.size + << " qubits."; + } + else + { + out_stream_ << ret.variable_name << " depends on "; + bool first = true; + for (auto &x : ret.depends_on) + { + if (!first) + { + out_stream_ << ", "; + } + out_stream_ << x; + first = false; + } + out_stream_ << " being constant to be static."; + } } + out_stream_ << "\n"; } } diff --git a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp index 865e2f5fbb..a3ca7382b2 100644 --- a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp +++ b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp @@ -19,9 +19,14 @@ class QubitAllocationAnalysisAnalytics struct QubitArray { - bool is_possibly_static{false}; ///< Indicates whether the array is possibly static or not - String variable_name{}; ///< Name of the qubit array - ArgList depends_on{}; ///< Function arguments that determines if it is constant or not + bool is_possibly_static{false}; ///< Indicates whether the array is + /// possibly static or not + /// + String variable_name{}; ///< Name of the qubit array + ArgList depends_on{}; ///< Function arguments that + /// determines if it is constant or not + /// + uint64_t size{static_cast(-1)}; ///< Size of the array if it can be deduced. }; using Value = llvm::Value; diff --git a/src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md b/src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md index e69de29bb2..4c2781d605 100644 --- a/src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md +++ b/src/Passes/libs/QubitAllocationAnalysis/SPECIFICATION.md @@ -0,0 +1,9 @@ +# Qubit Allocation Analysis + +## Purpose + +The purpose of this pass is to analyse the code for qubit allocations and identify +the allocation dependency. This helps subsequent transfomation passes expand the code +to, for instance, eliminate loops and classical logic. This is desirable as the control +logic for some quantum computing systems may be limited and one may therefore wish +to reduce its complexity as much as possible at compile time. From 0ae8d26adc40db982923ffd9397137825ea42665 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 13:44:57 +0200 Subject: [PATCH 40/48] CI and style --- .../LibQubitAllocationAnalysis.cpp | 62 +- .../QubitAllocationAnalysis.cpp | 560 +++++++++--------- .../QubitAllocationAnalysis.hpp | 207 +++---- 3 files changed, 419 insertions(+), 410 deletions(-) diff --git a/src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp b/src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp index 358514329b..ac03bc1f41 100644 --- a/src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp +++ b/src/Passes/libs/QubitAllocationAnalysis/LibQubitAllocationAnalysis.cpp @@ -2,46 +2,50 @@ // Licensed under the MIT License. #include "Llvm.hpp" + #include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" #include #include -namespace { +namespace +{ // Interface to plugin llvm::PassPluginLibraryInfo getQubitAllocationAnalysisPluginInfo() { - using namespace microsoft::quantum; - using namespace llvm; - - return { - LLVM_PLUGIN_API_VERSION, "QubitAllocationAnalysis", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering a printer for the anaylsis - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "print") - { - fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); - return true; - } - return false; - }); - - pb.registerVectorizerStartEPCallback( - [](llvm::FunctionPassManager &fpm, llvm::PassBuilder::OptimizationLevel /*level*/) { - fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); - }); - - // Registering the analysis module - pb.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &fam) { - fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); - }); - }}; + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "QubitAllocationAnalysis", LLVM_VERSION_STRING, + [](PassBuilder& pb) + { + // Registering a printer for the anaylsis + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) + { + if (name == "print") + { + fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); + return true; + } + return false; + }); + + pb.registerVectorizerStartEPCallback( + [](llvm::FunctionPassManager& fpm, llvm::PassBuilder::OptimizationLevel /*level*/) + { fpm.addPass(QubitAllocationAnalysisPrinter(llvm::errs())); }); + + // Registering the analysis module + pb.registerAnalysisRegistrationCallback( + [](FunctionAnalysisManager& fam) + { fam.registerPass([] { return QubitAllocationAnalysisAnalytics(); }); }); + }}; } -} // namespace +} // namespace extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getQubitAllocationAnalysisPluginInfo(); + return getQubitAllocationAnalysisPluginInfo(); } diff --git a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp index 0da8007336..60ef6aaeb7 100644 --- a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp +++ b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.cpp @@ -1,317 +1,321 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" - #include "Llvm.hpp" +#include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" + #include #include #include -namespace microsoft { -namespace quantum { - -bool QubitAllocationAnalysisAnalytics::operandsConstant(Instruction const &instruction) const +namespace microsoft +{ +namespace quantum { - // Default is true (i.e. the case of no operands) - bool ret = true; - // Checking that all oprands are constant - for (auto &op : instruction.operands()) - { + bool QubitAllocationAnalysisAnalytics::operandsConstant(Instruction const& instruction) const + { + // Default is true (i.e. the case of no operands) + bool ret = true; - // An operand is constant if its value was previously generated from - // a const expression ... - auto const_arg = constantness_dependencies_.find(op) != constantness_dependencies_.end(); + // Checking that all oprands are constant + for (auto& op : instruction.operands()) + { - // ... or if it is just a compile time constant. Note that we - // delibrately only consider integers. We may expand this - // to other constants once we have function support. - auto cst = llvm::dyn_cast(op); - auto is_constant = (cst != nullptr); + // An operand is constant if its value was previously generated from + // a const expression ... + auto const_arg = constantness_dependencies_.find(op) != constantness_dependencies_.end(); - ret = ret && (const_arg || is_constant); - } + // ... or if it is just a compile time constant. Note that we + // delibrately only consider integers. We may expand this + // to other constants once we have function support. + auto cst = llvm::dyn_cast(op); + auto is_constant = (cst != nullptr); - return ret; -} + ret = ret && (const_arg || is_constant); + } -void QubitAllocationAnalysisAnalytics::markPossibleConstant(Instruction &instruction) -{ - // Creating arg dependencies - ArgList all_dependencies{}; - for (auto &op : instruction.operands()) - { - // If the operand has dependecies ... - auto it = constantness_dependencies_.find(op); - if (it != constantness_dependencies_.end()) - { - // ... we add these as a dependency for the - // resulting instructions value - for (auto &arg : it->second) - { - all_dependencies.insert(arg); - } + return ret; } - } - - // Adding full list of dependices to the dependency graph - constantness_dependencies_.insert({&instruction, all_dependencies}); -} -void QubitAllocationAnalysisAnalytics::analyseCall(Instruction &instruction) -{ - // Skipping debug code - if (instruction.isDebugOrPseudoInst()) - { - return; - } - - // Recovering the call information - auto *call_instr = llvm::dyn_cast(&instruction); - if (call_instr == nullptr) - { - return; - } - - // Getting the name of the function being called - auto target_function = call_instr->getCalledFunction(); - auto name = target_function->getName(); - - // TODO(tfr): Make use of TargetLibraryInfo - if (name != "__quantum__rt__qubit_allocate_array") - { - return; - } - - // We expect only a single argument with the number - // of qubits allocated - if (call_instr->arg_size() != 1) - { - llvm::errs() << "Expected exactly one argument\n"; - return; - } - - // Next we extract the argument ... - auto argument = call_instr->getArgOperand(0); - if (argument == nullptr) - { - llvm::errs() << "Failed getting the size argument\n"; - return; - } - - // ... and checks whether it is a result of a dependant - // const expression - auto it = constantness_dependencies_.find(argument); - if (it != constantness_dependencies_.end()) - { - // If it is, we add the details to the result list - QubitArray qubit_array; - qubit_array.is_possibly_static = true; - qubit_array.variable_name = instruction.getName().str(); - qubit_array.depends_on = it->second; - - // Pushing to the result - results_.push_back(std::move(qubit_array)); - return; - } - - // Otherwise, it may be a static allocation based on a constant (or - // folded constant) - auto cst = llvm::dyn_cast(argument); - if (cst != nullptr) - { - QubitArray qubit_array; - qubit_array.is_possibly_static = true; - qubit_array.variable_name = instruction.getName().str(); - qubit_array.size = cst->getZExtValue(); - - // Pushing to the result - results_.push_back(std::move(qubit_array)); - - return; - } - - // If neither of the previous is the case, we are dealing with a non-static array - QubitArray qubit_array; - qubit_array.is_possibly_static = false; - qubit_array.variable_name = instruction.getName().str(); - - // Storing the result - results_.push_back(std::move(qubit_array)); -} - -void QubitAllocationAnalysisAnalytics::analyseFunction(llvm::Function &function) -{ - // Clearing results generated in a previous run - results_.clear(); - constantness_dependencies_.clear(); - - // Creating a list with function arguments - for (auto &arg : function.args()) - { - auto s = arg.getName().str(); - constantness_dependencies_.insert({&arg, {s}}); - } - - // Evaluating all expressions - for (auto &basic_block : function) - { - for (auto &instruction : basic_block) + void QubitAllocationAnalysisAnalytics::markPossibleConstant(Instruction& instruction) { - auto opcode = instruction.getOpcode(); - switch (opcode) - { - case llvm::Instruction::Sub: - case llvm::Instruction::Add: - case llvm::Instruction::Mul: - case llvm::Instruction::Shl: - case llvm::Instruction::LShr: - case llvm::Instruction::AShr: - case llvm::Instruction::And: - case llvm::Instruction::Or: - case llvm::Instruction::Xor: - if (operandsConstant(instruction)) + // Creating arg dependencies + ArgList all_dependencies{}; + for (auto& op : instruction.operands()) { - markPossibleConstant(instruction); + // If the operand has dependecies ... + auto it = constantness_dependencies_.find(op); + if (it != constantness_dependencies_.end()) + { + // ... we add these as a dependency for the + // resulting instructions value + for (auto& arg : it->second) + { + all_dependencies.insert(arg); + } + } } - break; - case llvm::Instruction::Call: - analyseCall(instruction); - break; - // Unanalysed statements - case llvm::Instruction::Ret: - case llvm::Instruction::Br: - case llvm::Instruction::Switch: - case llvm::Instruction::IndirectBr: - case llvm::Instruction::Invoke: - case llvm::Instruction::Resume: - case llvm::Instruction::Unreachable: - case llvm::Instruction::CleanupRet: - case llvm::Instruction::CatchRet: - case llvm::Instruction::CatchSwitch: - case llvm::Instruction::CallBr: - case llvm::Instruction::FNeg: - case llvm::Instruction::FAdd: - case llvm::Instruction::FSub: - case llvm::Instruction::FMul: - case llvm::Instruction::UDiv: - case llvm::Instruction::SDiv: - case llvm::Instruction::FDiv: - case llvm::Instruction::URem: - case llvm::Instruction::SRem: - case llvm::Instruction::FRem: - case llvm::Instruction::Alloca: - case llvm::Instruction::Load: - case llvm::Instruction::Store: - case llvm::Instruction::GetElementPtr: - case llvm::Instruction::Fence: - case llvm::Instruction::AtomicCmpXchg: - case llvm::Instruction::AtomicRMW: - case llvm::Instruction::Trunc: - case llvm::Instruction::ZExt: - case llvm::Instruction::SExt: - case llvm::Instruction::FPToUI: - case llvm::Instruction::FPToSI: - case llvm::Instruction::UIToFP: - case llvm::Instruction::SIToFP: - case llvm::Instruction::FPTrunc: - case llvm::Instruction::FPExt: - case llvm::Instruction::PtrToInt: - case llvm::Instruction::IntToPtr: - case llvm::Instruction::BitCast: - case llvm::Instruction::AddrSpaceCast: - case llvm::Instruction::CleanupPad: - case llvm::Instruction::CatchPad: - case llvm::Instruction::ICmp: - case llvm::Instruction::FCmp: - case llvm::Instruction::PHI: - case llvm::Instruction::Select: - case llvm::Instruction::UserOp1: - case llvm::Instruction::UserOp2: - case llvm::Instruction::VAArg: - case llvm::Instruction::ExtractElement: - case llvm::Instruction::InsertElement: - case llvm::Instruction::ShuffleVector: - case llvm::Instruction::ExtractValue: - case llvm::Instruction::InsertValue: - case llvm::Instruction::LandingPad: - // End of Binary Ops - default: - break; - } + + // Adding full list of dependices to the dependency graph + constantness_dependencies_.insert({&instruction, all_dependencies}); } - } -} -QubitAllocationAnalysisAnalytics::Result QubitAllocationAnalysisAnalytics::run( - llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/) -{ - // Running functin analysis - analyseFunction(function); + void QubitAllocationAnalysisAnalytics::analyseCall(Instruction& instruction) + { + // Skipping debug code + if (instruction.isDebugOrPseudoInst()) + { + return; + } - // ... and return the result. - return results_; -} + // Recovering the call information + auto* call_instr = llvm::dyn_cast(&instruction); + if (call_instr == nullptr) + { + return; + } -QubitAllocationAnalysisPrinter::QubitAllocationAnalysisPrinter(llvm::raw_ostream &out_stream) - : out_stream_(out_stream) -{} + // Getting the name of the function being called + auto target_function = call_instr->getCalledFunction(); + auto name = target_function->getName(); -llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run(llvm::Function & function, - llvm::FunctionAnalysisManager &fam) -{ - auto &results = fam.getResult(function); - - if (!results.empty()) - { - out_stream_ << function.getName() << "\n"; - out_stream_ << "====================" - << "\n\n"; - for (auto const &ret : results) + // TODO(tfr): Make use of TargetLibraryInfo + if (name != "__quantum__rt__qubit_allocate_array") + { + return; + } + + // We expect only a single argument with the number + // of qubits allocated + if (call_instr->arg_size() != 1) + { + llvm::errs() << "Expected exactly one argument\n"; + return; + } + + // Next we extract the argument ... + auto argument = call_instr->getArgOperand(0); + if (argument == nullptr) + { + llvm::errs() << "Failed getting the size argument\n"; + return; + } + + // ... and checks whether it is a result of a dependant + // const expression + auto it = constantness_dependencies_.find(argument); + if (it != constantness_dependencies_.end()) + { + // If it is, we add the details to the result list + QubitArray qubit_array; + qubit_array.is_possibly_static = true; + qubit_array.variable_name = instruction.getName().str(); + qubit_array.depends_on = it->second; + + // Pushing to the result + results_.push_back(std::move(qubit_array)); + return; + } + + // Otherwise, it may be a static allocation based on a constant (or + // folded constant) + auto cst = llvm::dyn_cast(argument); + if (cst != nullptr) + { + QubitArray qubit_array; + qubit_array.is_possibly_static = true; + qubit_array.variable_name = instruction.getName().str(); + qubit_array.size = cst->getZExtValue(); + + // Pushing to the result + results_.push_back(std::move(qubit_array)); + + return; + } + + // If neither of the previous is the case, we are dealing with a non-static array + QubitArray qubit_array; + qubit_array.is_possibly_static = false; + qubit_array.variable_name = instruction.getName().str(); + + // Storing the result + results_.push_back(std::move(qubit_array)); + } + + void QubitAllocationAnalysisAnalytics::analyseFunction(llvm::Function& function) { - if (!ret.is_possibly_static) - { - out_stream_ << ret.variable_name << " is dynamic.\n"; - } - else - { - if (ret.depends_on.empty()) + // Clearing results generated in a previous run + results_.clear(); + constantness_dependencies_.clear(); + + // Creating a list with function arguments + for (auto& arg : function.args()) { - out_stream_ << ret.variable_name << " is trivially static with " << ret.size - << " qubits."; + auto s = arg.getName().str(); + constantness_dependencies_.insert({&arg, {s}}); } - else + + // Evaluating all expressions + for (auto& basic_block : function) { - out_stream_ << ret.variable_name << " depends on "; - bool first = true; - for (auto &x : ret.depends_on) - { - if (!first) + for (auto& instruction : basic_block) { - out_stream_ << ", "; + auto opcode = instruction.getOpcode(); + switch (opcode) + { + case llvm::Instruction::Sub: + case llvm::Instruction::Add: + case llvm::Instruction::Mul: + case llvm::Instruction::Shl: + case llvm::Instruction::LShr: + case llvm::Instruction::AShr: + case llvm::Instruction::And: + case llvm::Instruction::Or: + case llvm::Instruction::Xor: + if (operandsConstant(instruction)) + { + markPossibleConstant(instruction); + } + break; + case llvm::Instruction::Call: + analyseCall(instruction); + break; + // Unanalysed statements + case llvm::Instruction::Ret: + case llvm::Instruction::Br: + case llvm::Instruction::Switch: + case llvm::Instruction::IndirectBr: + case llvm::Instruction::Invoke: + case llvm::Instruction::Resume: + case llvm::Instruction::Unreachable: + case llvm::Instruction::CleanupRet: + case llvm::Instruction::CatchRet: + case llvm::Instruction::CatchSwitch: + case llvm::Instruction::CallBr: + case llvm::Instruction::FNeg: + case llvm::Instruction::FAdd: + case llvm::Instruction::FSub: + case llvm::Instruction::FMul: + case llvm::Instruction::UDiv: + case llvm::Instruction::SDiv: + case llvm::Instruction::FDiv: + case llvm::Instruction::URem: + case llvm::Instruction::SRem: + case llvm::Instruction::FRem: + case llvm::Instruction::Alloca: + case llvm::Instruction::Load: + case llvm::Instruction::Store: + case llvm::Instruction::GetElementPtr: + case llvm::Instruction::Fence: + case llvm::Instruction::AtomicCmpXchg: + case llvm::Instruction::AtomicRMW: + case llvm::Instruction::Trunc: + case llvm::Instruction::ZExt: + case llvm::Instruction::SExt: + case llvm::Instruction::FPToUI: + case llvm::Instruction::FPToSI: + case llvm::Instruction::UIToFP: + case llvm::Instruction::SIToFP: + case llvm::Instruction::FPTrunc: + case llvm::Instruction::FPExt: + case llvm::Instruction::PtrToInt: + case llvm::Instruction::IntToPtr: + case llvm::Instruction::BitCast: + case llvm::Instruction::AddrSpaceCast: + case llvm::Instruction::CleanupPad: + case llvm::Instruction::CatchPad: + case llvm::Instruction::ICmp: + case llvm::Instruction::FCmp: + case llvm::Instruction::PHI: + case llvm::Instruction::Select: + case llvm::Instruction::UserOp1: + case llvm::Instruction::UserOp2: + case llvm::Instruction::VAArg: + case llvm::Instruction::ExtractElement: + case llvm::Instruction::InsertElement: + case llvm::Instruction::ShuffleVector: + case llvm::Instruction::ExtractValue: + case llvm::Instruction::InsertValue: + case llvm::Instruction::LandingPad: + // End of Binary Ops + default: + break; + } } - out_stream_ << x; - first = false; - } - out_stream_ << " being constant to be static."; } - } + } - out_stream_ << "\n"; + QubitAllocationAnalysisAnalytics::Result QubitAllocationAnalysisAnalytics::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& /*unused*/) + { + // Running functin analysis + analyseFunction(function); + + // ... and return the result. + return results_; } - } - return llvm::PreservedAnalyses::all(); -} + QubitAllocationAnalysisPrinter::QubitAllocationAnalysisPrinter(llvm::raw_ostream& out_stream) + : out_stream_(out_stream) + { + } -bool QubitAllocationAnalysisPrinter::isRequired() -{ - return true; -} + llvm::PreservedAnalyses QubitAllocationAnalysisPrinter::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& fam) + { + auto& results = fam.getResult(function); + + if (!results.empty()) + { + out_stream_ << function.getName() << "\n"; + out_stream_ << "====================" + << "\n\n"; + for (auto const& ret : results) + { + if (!ret.is_possibly_static) + { + out_stream_ << ret.variable_name << " is dynamic.\n"; + } + else + { + if (ret.depends_on.empty()) + { + out_stream_ << ret.variable_name << " is trivially static with " << ret.size << " qubits."; + } + else + { + out_stream_ << ret.variable_name << " depends on "; + bool first = true; + for (auto& x : ret.depends_on) + { + if (!first) + { + out_stream_ << ", "; + } + out_stream_ << x; + first = false; + } + out_stream_ << " being constant to be static."; + } + } + + out_stream_ << "\n"; + } + } + + return llvm::PreservedAnalyses::all(); + } + + bool QubitAllocationAnalysisPrinter::isRequired() + { + return true; + } -llvm::AnalysisKey QubitAllocationAnalysisAnalytics::Key; + llvm::AnalysisKey QubitAllocationAnalysisAnalytics::Key; -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp index a3ca7382b2..215388be56 100644 --- a/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp +++ b/src/Passes/libs/QubitAllocationAnalysis/QubitAllocationAnalysis.hpp @@ -7,108 +7,109 @@ #include #include -namespace microsoft { -namespace quantum { - -class QubitAllocationAnalysisAnalytics - : public llvm::AnalysisInfoMixin +namespace microsoft { -public: - using String = std::string; - using ArgList = std::unordered_set; - - struct QubitArray - { - bool is_possibly_static{false}; ///< Indicates whether the array is - /// possibly static or not - /// - String variable_name{}; ///< Name of the qubit array - ArgList depends_on{}; ///< Function arguments that - /// determines if it is constant or not - /// - uint64_t size{static_cast(-1)}; ///< Size of the array if it can be deduced. - }; - - using Value = llvm::Value; - using DependencyGraph = std::unordered_map; - using ValueDependencyGraph = std::unordered_map; - - using Instruction = llvm::Instruction; - using Function = llvm::Function; - using Result = std::vector; - - /// Constructors and destructors - /// @{ - QubitAllocationAnalysisAnalytics() = default; - QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics const &) = delete; - QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics &&) = default; - ~QubitAllocationAnalysisAnalytics() = default; - /// @} - - /// Operators - /// @{ - QubitAllocationAnalysisAnalytics &operator=(QubitAllocationAnalysisAnalytics const &) = delete; - QubitAllocationAnalysisAnalytics &operator=(QubitAllocationAnalysisAnalytics &&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - Result run(llvm::Function &function, llvm::FunctionAnalysisManager & /*unused*/); - /// @} - - /// Function analysis - /// @{ - void analyseFunction(llvm::Function &function); - /// @} - - /// Instruction analysis - /// @{ - bool operandsConstant(Instruction const &instruction) const; - void markPossibleConstant(Instruction &instruction); - void analyseCall(Instruction &instruction); - /// @} - -private: - static llvm::AnalysisKey Key; // NOLINT - friend struct llvm::AnalysisInfoMixin; - - /// Analysis details - /// @{ - ValueDependencyGraph constantness_dependencies_{}; - /// @} - - /// Result - /// @{ - Result results_{}; - /// @} -}; - -class QubitAllocationAnalysisPrinter : public llvm::PassInfoMixin +namespace quantum { -public: - /// Constructors and destructors - /// @{ - explicit QubitAllocationAnalysisPrinter(llvm::raw_ostream &out_stream); - QubitAllocationAnalysisPrinter() = delete; - QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter const &) = delete; - QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter &&) = default; - ~QubitAllocationAnalysisPrinter() = default; - /// @} - - /// Operators - /// @{ - QubitAllocationAnalysisPrinter &operator=(QubitAllocationAnalysisPrinter const &) = delete; - QubitAllocationAnalysisPrinter &operator=(QubitAllocationAnalysisPrinter &&) = delete; - /// @} - - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); - static bool isRequired(); - /// @} -private: - llvm::raw_ostream &out_stream_; -}; - -} // namespace quantum -} // namespace microsoft + + class QubitAllocationAnalysisAnalytics : public llvm::AnalysisInfoMixin + { + public: + using String = std::string; + using ArgList = std::unordered_set; + + struct QubitArray + { + bool is_possibly_static{false}; ///< Indicates whether the array is + /// possibly static or not + /// + String variable_name{}; ///< Name of the qubit array + ArgList depends_on{}; ///< Function arguments that + /// determines if it is constant or not + /// + uint64_t size{static_cast(-1)}; ///< Size of the array if it can be deduced. + }; + + using Value = llvm::Value; + using DependencyGraph = std::unordered_map; + using ValueDependencyGraph = std::unordered_map; + + using Instruction = llvm::Instruction; + using Function = llvm::Function; + using Result = std::vector; + + /// Constructors and destructors + /// @{ + QubitAllocationAnalysisAnalytics() = default; + QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics const&) = delete; + QubitAllocationAnalysisAnalytics(QubitAllocationAnalysisAnalytics&&) = default; + ~QubitAllocationAnalysisAnalytics() = default; + /// @} + + /// Operators + /// @{ + QubitAllocationAnalysisAnalytics& operator=(QubitAllocationAnalysisAnalytics const&) = delete; + QubitAllocationAnalysisAnalytics& operator=(QubitAllocationAnalysisAnalytics&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + Result run(llvm::Function& function, llvm::FunctionAnalysisManager& /*unused*/); + /// @} + + /// Function analysis + /// @{ + void analyseFunction(llvm::Function& function); + /// @} + + /// Instruction analysis + /// @{ + bool operandsConstant(Instruction const& instruction) const; + void markPossibleConstant(Instruction& instruction); + void analyseCall(Instruction& instruction); + /// @} + + private: + static llvm::AnalysisKey Key; // NOLINT + friend struct llvm::AnalysisInfoMixin; + + /// Analysis details + /// @{ + ValueDependencyGraph constantness_dependencies_{}; + /// @} + + /// Result + /// @{ + Result results_{}; + /// @} + }; + + class QubitAllocationAnalysisPrinter : public llvm::PassInfoMixin + { + public: + /// Constructors and destructors + /// @{ + explicit QubitAllocationAnalysisPrinter(llvm::raw_ostream& out_stream); + QubitAllocationAnalysisPrinter() = delete; + QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter const&) = delete; + QubitAllocationAnalysisPrinter(QubitAllocationAnalysisPrinter&&) = default; + ~QubitAllocationAnalysisPrinter() = default; + /// @} + + /// Operators + /// @{ + QubitAllocationAnalysisPrinter& operator=(QubitAllocationAnalysisPrinter const&) = delete; + QubitAllocationAnalysisPrinter& operator=(QubitAllocationAnalysisPrinter&&) = delete; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} + private: + llvm::raw_ostream& out_stream_; + }; + +} // namespace quantum +} // namespace microsoft From 436f5445cc0e0155a1c736410d1ed178862f9bee Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 13:55:05 +0200 Subject: [PATCH 41/48] Adding documentation as per request --- src/Passes/docs/continous-integration.md | 91 ++++++++++++++++++++++++ src/Passes/docs/library-structure.md | 38 ++++++++++ 2 files changed, 129 insertions(+) create mode 100644 src/Passes/docs/continous-integration.md create mode 100644 src/Passes/docs/library-structure.md diff --git a/src/Passes/docs/continous-integration.md b/src/Passes/docs/continous-integration.md new file mode 100644 index 0000000000..08d05c0738 --- /dev/null +++ b/src/Passes/docs/continous-integration.md @@ -0,0 +1,91 @@ +# Continuous integration + +This component is the largest part of this PR. The continuous integration component includes: + +1. Style formatting to ensure that everything looks the same. This includes checking that relevant copyrights are in place. +2. Static analysis +3. Unit testing + +The automatic style enforcement is configurable with the ability to easily add or remove rules. Currently the source pipelines are defined as: + +```python +SOURCE_PIPELINES = [ + { + "name": "C++ Main", + "src": path.join(PROJECT_ROOT, "libs"), + + "pipelines": { + "hpp": [ + require_pragma_once, + enforce_cpp_license, + enforce_formatting + ], + "cpp": [ + enforce_cpp_license, + enforce_formatting + ] + } + }, + # ... +] +``` + +This part defines pipelines for `.hpp` files and `.cpp` files allowing the developer to add such requirements such as having copyright in the op of the source file and ensure that formatting follows that given by `.clang-format`. + +Each of these CI stages can executed individually using `./manage` or you can run the entire CI process by invoking `./manage runci`. An example of what this may look like is here: + +```zsh +% ./manage runci + +2021-07-21 14:38:04,896 - FormatChecker - ERROR - /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/src/OpsCounter/OpsCounter.cpp was not correctly formatted. +2021-07-21 14:38:04,899 - FormatChecker - ERROR - Your code did not pass formatting. + +% ./manage stylecheck --fix-issues +% ./manage runci + +-- Found LLVM 11.1.0 +-- Using LLVMConfig.cmake in: /usr/local/opt/llvm@11/lib/cmake/llvm +-- Configuring done +-- Generating done +-- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/Debug +Consolidate compiler generated dependencies of target QSharpPasses +[ 50%] Building CXX object CMakeFiles/QSharpPasses.dir/src/OpsCounter/OpsCounter.cpp.o +[100%] Linking CXX shared library libQSharpPasses.dylib +ld: warning: directory not found for option '-L/usr/local/opt/llvm/lib' +[100%] Built target QSharpPasses +/Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/src/OpsCounter/OpsCounter.cpp:29:7: error: invalid case style for class 'LegacyOpsCounterPass' [readability-identifier-naming,-warnings-as-errors] +class LegacyOpsCounterPass : public FunctionPass + ^~~~~~~~~~~~~~~~~~~~ + CLegacyOpsCounterPass +113345 warnings generated. +Suppressed 113345 warnings (113344 in non-user code, 1 NOLINT). +Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well. +1 warning treated as error +2021-07-21 14:38:40,191 - Linter - ERROR - /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/src/OpsCounter/OpsCounter.cpp failed static analysis + +# ISSUES FIXED MANUALLY +% ./manage runci + +-- Found LLVM 11.1.0 +-- Using LLVMConfig.cmake in: /usr/local/opt/llvm@11/lib/cmake/llvm +-- Configuring done +-- Generating done +-- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/Debug +Consolidate compiler generated dependencies of target QSharpPasses +[ 50%] Building CXX object CMakeFiles/QSharpPasses.dir/src/OpsCounter/OpsCounter.cpp.o +[100%] Linking CXX shared library libQSharpPasses.dylib +ld: warning: directory not found for option '-L/usr/local/opt/llvm/lib' +[100%] Built target QSharpPasses +-- Found LLVM 11.1.0 +-- Using LLVMConfig.cmake in: /usr/local/opt/llvm@11/lib/cmake/llvm +-- Configuring done +-- Generating done +-- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/Debug +Consolidate compiler generated dependencies of target QSharpPasses +[100%] Built target QSharpPasses +********************************* +No test configuration file found! +********************************* +``` + +The key idea here is to make it extremely easy to be complaint with the style guide, correct any issues that might come as a result of static analysis and at the same time enforce this when a PR is made. diff --git a/src/Passes/docs/library-structure.md b/src/Passes/docs/library-structure.md new file mode 100644 index 0000000000..d1977cb0e1 --- /dev/null +++ b/src/Passes/docs/library-structure.md @@ -0,0 +1,38 @@ +# Library structure for passes + +An important part of this PR is that it proposes a structure for passes: It is suggested that each pass has their own subcode base. The reason for this proposal is that it makes it very easy to add and remove passes as well as decide which passes to link against. Each pass is kept in its own subdirectory under `libs`: + +``` +libs +├── CMakeLists.txt +└── OpsCounter + ├── OpsCounter.cpp + └── OpsCounter.hpp +``` + +Adding a new pass is easy using the `manage` tool developed in this PR: + +``` +% ./manage create-pass HelloWorld +Available templates: + +1. Function Pass + +Select a template:1 +``` + +which results in a new pass code in the `libs`: + +``` +libs +├── CMakeLists.txt +├── HelloWorld +│ ├── HelloWorld.cpp +│ ├── HelloWorld.hpp +│ └── SPECIFICATION.md +└── OpsCounter + ├── OpsCounter.cpp + └── OpsCounter.hpp +``` + +A full example of how to create a basic function pass is included in the README.md file for anyone interested. From ee7a0972f638c58c826c6e2013ce91e9f8c87793 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 13:59:49 +0200 Subject: [PATCH 42/48] Minor documentation update --- src/Passes/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Passes/README.md b/src/Passes/README.md index e8107da1e8..6419c757b0 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -185,7 +185,7 @@ and then make your target make [target] ``` -Valid targets are the name of the folders in `libs/` found in the passes root. +The default target is `all`. Other valid targets are the name of the folders in `libs/` found in the passes root. ## Running a pass From 162d5ce7070db1bc55495de945ba67de375fdcc2 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Tue, 27 Jul 2021 15:35:42 +0200 Subject: [PATCH 43/48] Adding lit tests --- src/Passes/CMakeLists.txt | 2 +- src/Passes/docs/continous-integration.md | 14 +++++ src/Passes/requirements.txt | 1 + src/Passes/site-packages/TasksCI/builder.py | 15 +++++- src/Passes/tests/CMakeLists.txt | 10 ++++ .../tests/QubitAllocationAnalysis/case1.ll | 15 ++++++ .../tests/QubitAllocationAnalysis/case2.ll | 14 +++++ .../inputs/static-qubit-arrays-1.ll | 51 +++++++++++++++++++ .../inputs/static-qubit-arrays-2.ll} | 0 src/Passes/tests/lit.cfg.py | 37 ++++++++++++++ src/Passes/tests/lit.site.cfg.py.in | 15 ++++++ 11 files changed, 172 insertions(+), 2 deletions(-) create mode 100644 src/Passes/tests/CMakeLists.txt create mode 100644 src/Passes/tests/QubitAllocationAnalysis/case1.ll create mode 100644 src/Passes/tests/QubitAllocationAnalysis/case2.ll create mode 100644 src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.ll rename src/Passes/{examples/QubitAllocationAnalysis/analysis-example.ll => tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.ll} (100%) create mode 100644 src/Passes/tests/lit.cfg.py create mode 100644 src/Passes/tests/lit.site.cfg.py.in diff --git a/src/Passes/CMakeLists.txt b/src/Passes/CMakeLists.txt index 0a55f495dd..f7ccafaa22 100644 --- a/src/Passes/CMakeLists.txt +++ b/src/Passes/CMakeLists.txt @@ -46,4 +46,4 @@ include_directories(${CMAKE_SOURCE_DIR}/src) # Adding the libraries add_subdirectory(libs) - +add_subdirectory(tests) \ No newline at end of file diff --git a/src/Passes/docs/continous-integration.md b/src/Passes/docs/continous-integration.md index 08d05c0738..30cc273ef5 100644 --- a/src/Passes/docs/continous-integration.md +++ b/src/Passes/docs/continous-integration.md @@ -1,3 +1,17 @@ +# Running tests + +In order to run the tests, you first need to build the library. Assuming that this is already done and the corresponding build is in `Debug/`, run the tests from the `Debug` folder: + +``` +% lit tests/ -v +-- Testing: 2 tests, 2 workers -- +PASS: Quantum-Passes :: QubitAllocationAnalysis/case1.ll (1 of 2) +PASS: Quantum-Passes :: QubitAllocationAnalysis/case2.ll (2 of 2) + +Testing Time: 0.27s + Passed: 2 +``` + # Continuous integration This component is the largest part of this PR. The continuous integration component includes: diff --git a/src/Passes/requirements.txt b/src/Passes/requirements.txt index 77c1d85ae8..b937a83e88 100644 --- a/src/Passes/requirements.txt +++ b/src/Passes/requirements.txt @@ -1 +1,2 @@ click==8.0.1 +lit==12.0.1 diff --git a/src/Passes/site-packages/TasksCI/builder.py b/src/Passes/site-packages/TasksCI/builder.py index bd6b574b04..f11df780b9 100644 --- a/src/Passes/site-packages/TasksCI/builder.py +++ b/src/Passes/site-packages/TasksCI/builder.py @@ -64,7 +64,17 @@ def run_tests(build_dir: str, concurrency=None): """ Runs the unit tests given a build directory. """ + fail = False + # Running lit tests + lit_cmd = ["lit", "tests/", "-v"] + exit_code = subprocess.call(lit_cmd, cwd=build_dir) + + if exit_code != 0: + logger.error('Lit test failed') + fail = True + + # Running CMake tests cmake_cmd = [toolchain.discover_ctest()] if concurrency is not None: @@ -72,7 +82,10 @@ def run_tests(build_dir: str, concurrency=None): exit_code = subprocess.call(cmake_cmd, cwd=build_dir) if exit_code != 0: - logger.error('Failed to configure project') + logger.error('CTest failed project') + fail = True + + if fail: sys.exit(exit_code) diff --git a/src/Passes/tests/CMakeLists.txt b/src/Passes/tests/CMakeLists.txt new file mode 100644 index 0000000000..60d980f867 --- /dev/null +++ b/src/Passes/tests/CMakeLists.txt @@ -0,0 +1,10 @@ +set(LT_TEST_SHLIBEXT "${CMAKE_SHARED_LIBRARY_SUFFIX}") + +set(LT_TEST_SITE_CFG_INPUT "${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in") +set(LT_TEST_SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}") + +set(LIT_SITE_CFG_IN_HEADER "## Autogenerated from ${LT_TEST_SITE_CFG_INPUT}\n## Do not edit!") + +configure_file("${LT_TEST_SITE_CFG_INPUT}" + "${CMAKE_CURRENT_BINARY_DIR}/lit.cfg.py" @ONLY +) diff --git a/src/Passes/tests/QubitAllocationAnalysis/case1.ll b/src/Passes/tests/QubitAllocationAnalysis/case1.ll new file mode 100644 index 0000000000..cab5557980 --- /dev/null +++ b/src/Passes/tests/QubitAllocationAnalysis/case1.ll @@ -0,0 +1,15 @@ +; RUN: opt -load-pass-plugin %shlibdir/libQubitAllocationAnalysis%shlibext -passes="print" %S/inputs/static-qubit-arrays-1.ll -disable-output 2>&1\ +; RUN: | FileCheck %s + +;------------------------------------------------------------------------------ +; EXPECTED OUTPUT +;------------------------------------------------------------------------------ + +; CHECK: Example__QuantumProgram__body +; CHECK: ==================== + +; CHECK: qubits depends on x being constant to be static. + + + + diff --git a/src/Passes/tests/QubitAllocationAnalysis/case2.ll b/src/Passes/tests/QubitAllocationAnalysis/case2.ll new file mode 100644 index 0000000000..7f90c61a50 --- /dev/null +++ b/src/Passes/tests/QubitAllocationAnalysis/case2.ll @@ -0,0 +1,14 @@ +; RUN: opt -load-pass-plugin %shlibdir/libQubitAllocationAnalysis%shlibext -passes="print" %S/inputs/static-qubit-arrays-2.ll -disable-output 2>&1\ +; RUN: | FileCheck %s + +;------------------------------------------------------------------------------ +; EXPECTED OUTPUT +;------------------------------------------------------------------------------ + +; CHECK: Example__QuantumProgram__body +; CHECK: ==================== +; CHECK: qubits0 is trivially static with 9 qubits. +; CHECK: qubits1 depends on x being constant to be static. +; CHECK: qubits2 depends on x, g being constant to be static. +; CHECK: qubits3 depends on h being constant to be static. +; CHECK: qubits4 is dynamic. diff --git a/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.ll b/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.ll new file mode 100644 index 0000000000..ea4ead0400 --- /dev/null +++ b/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-1.ll @@ -0,0 +1,51 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumProgram__body(i64 3) + call fastcc void @Example__QuantumProgram__body(i64 4) + ret void +} + +define internal fastcc void @Example__QuantumProgram__body(i64 %x) unnamed_addr { +entry: + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %x) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #0 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.ll similarity index 100% rename from src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll rename to src/Passes/tests/QubitAllocationAnalysis/inputs/static-qubit-arrays-2.ll diff --git a/src/Passes/tests/lit.cfg.py b/src/Passes/tests/lit.cfg.py new file mode 100644 index 0000000000..68e1c797f7 --- /dev/null +++ b/src/Passes/tests/lit.cfg.py @@ -0,0 +1,37 @@ +# -*- Python -*- +import platform +import lit.formats +from lit.llvm import llvm_config +from lit.llvm.subst import ToolSubst +import shutil + +config.llvm_tools_dir = os.path.dirname(shutil.which("opt")) +config.name = 'Quantum-Passes' +config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell) +config.suffixes = ['.ll'] +config.test_source_root = os.path.dirname(__file__) +config.excludes = ['inputs', "*/inputs", "**/inputs"] + +if platform.system() == 'Darwin': + tool_substitutions = [ + ToolSubst('%clang', "clang", + extra_args=["-isysroot", + "`xcrun --show-sdk-path`", + "-mlinker-version=0"]), + ] +else: + tool_substitutions = [ + ToolSubst('%clang', "clang", + ) + ] +llvm_config.add_tool_substitutions(tool_substitutions) +tools = ["opt", "lli", "not", "FileCheck", "clang"] +llvm_config.add_tool_substitutions(tools, config.llvm_tools_dir) +config.substitutions.append(('%shlibext', config.llvm_shlib_ext)) +config.substitutions.append(('%shlibdir', config.llvm_shlib_dir)) + + +# References: +# https://github.com/banach-space/llvm-tutor +# http://lists.llvm.org/pipermail/cfe-dev/2016-July/049868.html +# https://github.com/Homebrew/homebrew-core/issues/52461 diff --git a/src/Passes/tests/lit.site.cfg.py.in b/src/Passes/tests/lit.site.cfg.py.in new file mode 100644 index 0000000000..c6888bfbf2 --- /dev/null +++ b/src/Passes/tests/lit.site.cfg.py.in @@ -0,0 +1,15 @@ +import sys + +config.llvm_tools_dir = "@LT_LLVM_INSTALL_DIR@/bin" +config.llvm_shlib_ext = "@LT_TEST_SHLIBEXT@" +config.llvm_shlib_dir = "@CMAKE_BINARY_DIR@/libs" + +import lit.llvm +# lit_config is a global instance of LitConfig +lit.llvm.initialize(lit_config, config) + +# test_exec_root: The root path where tests should be run. +config.test_exec_root = os.path.join("@CMAKE_CURRENT_BINARY_DIR@") + +# Let the main config do the real work. +lit_config.load_config(config, "@LT_TEST_SRC_DIR@/lit.cfg.py") From c5a1e84b8737fe0c61a74f9e132703048b34a0df Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 28 Jul 2021 16:35:23 +0200 Subject: [PATCH 44/48] Finalising expansion pass --- .../ConstSizeArray/ConstSizeArray.qs | 7 +- .../examples/QubitAllocationAnalysis/Makefile | 20 +- .../examples/QubitAllocationAnalysis/test.txt | 199 ++++++++++++++++ src/Passes/include/Llvm.hpp | 16 +- .../ExpandStaticAllocation.cpp | 223 ++++++++++++++++++ .../ExpandStaticAllocation.hpp | 46 ++++ .../LibExpandStaticAllocation.cpp | 38 +++ .../ExpandStaticAllocation/SPECIFICATION.md | 1 + 8 files changed, 544 insertions(+), 6 deletions(-) create mode 100644 src/Passes/examples/QubitAllocationAnalysis/test.txt create mode 100644 src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp create mode 100644 src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp create mode 100644 src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp create mode 100644 src/Passes/libs/ExpandStaticAllocation/SPECIFICATION.md diff --git a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs index c8c78d1aea..0b5655ddec 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs +++ b/src/Passes/examples/QubitAllocationAnalysis/ConstSizeArray/ConstSizeArray.qs @@ -2,8 +2,9 @@ namespace Example { @EntryPoint() operation Main() : Int { + QuantumProgram(3,2,1); - QuantumProgram(4,9,4); + QuantumProgram(4,X(2),4); return 0; } @@ -21,5 +22,9 @@ namespace Example { use qubits2 = Qubit[y - g]; use qubits3 = Qubit[h]; use qubits4 = Qubit[X(x)]; + + for idxIteration in 0..g { + //Message(idxIteration); + } } } \ No newline at end of file diff --git a/src/Passes/examples/QubitAllocationAnalysis/Makefile b/src/Passes/examples/QubitAllocationAnalysis/Makefile index dd808878eb..feedf8753f 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/Makefile +++ b/src/Passes/examples/QubitAllocationAnalysis/Makefile @@ -1,8 +1,20 @@ -run: build analysis-example.ll +run-expand: build-qaa build-esa analysis-example.ll + opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib \ + -load-pass-plugin ../../Debug/libs/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll + + +run: build-qaa analysis-example.ll opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll -build: - pushd ../../ && mkdir -p Debug && cd Debug && cmake .. && make QubitAllocationAnalysis && popd || popd + +build-prepare: + pushd ../../ && mkdir -p Debug && cd Debug && cmake ..&& popd || popd + +build-qaa: build-prepare + pushd ../../Debug && make QubitAllocationAnalysis && popd || popd + +build-esa: build-prepare + pushd ../../Debug && make ExpandStaticAllocation && popd || popd analysis-example.ll: @@ -10,4 +22,4 @@ analysis-example.ll: clean: cd ConstSizeArray && make clean - rm analysis-example.ll \ No newline at end of file + rm analysis-example.ll diff --git a/src/Passes/examples/QubitAllocationAnalysis/test.txt b/src/Passes/examples/QubitAllocationAnalysis/test.txt new file mode 100644 index 0000000000..133f3304be --- /dev/null +++ b/src/Passes/examples/QubitAllocationAnalysis/test.txt @@ -0,0 +1,199 @@ +pushd ../../ && mkdir -p Debug && cd Debug && cmake ..&& popd || popd +~/Documents/Projects/qsharp-compiler/src/Passes ~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis +-- Found LLVM 12.0.1 +-- Using LLVMConfig.cmake in: /usr/local/opt/llvm/lib/cmake/llvm +-- Configuring done +-- Generating done +-- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/Passes/Debug +~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis +pushd ../../Debug && make QubitAllocationAnalysis && popd || popd +~/Documents/Projects/qsharp-compiler/src/Passes/Debug ~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis +Consolidate compiler generated dependencies of target QubitAllocationAnalysis +[100%] Built target QubitAllocationAnalysis +~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis +pushd ../../Debug && make ExpandStaticAllocation && popd || popd +~/Documents/Projects/qsharp-compiler/src/Passes/Debug ~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis +Consolidate compiler generated dependencies of target ExpandStaticAllocation +[100%] Built target ExpandStaticAllocation +~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis +opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib \ + -load-pass-plugin ../../Debug/libs/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll +; ModuleID = 'analysis-example.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Array = type opaque +%String = type opaque + +define internal fastcc void @Example__Main__body() unnamed_addr { +entry: + call fastcc void @Example__QuantumProgram__body(i64 3, i64 2, i64 1) + call fastcc void @Example__QuantumProgram__body(i64 4, i64 9, i64 4) + ret void +} + +define internal fastcc void @Example__QuantumProgram__body(i64 %x, i64 %h, i64 %g) unnamed_addr { +entry: + %.neg = xor i64 %x, -1 + %.neg1 = mul i64 %.neg, %x + %z.neg = add i64 %.neg1, 47 + %y = mul i64 %x, 3 + %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) + %0 = add i64 %y, -2 + %1 = lshr i64 %0, 1 + %2 = add i64 %z.neg, %1 + %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) + %3 = sub i64 %y, %g + %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) + %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %h) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) + %4 = call fastcc i64 @Example__X__body(i64 %x) + %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) + br label %header__1 + +header__1: ; preds = %header__1, %entry + %idxIteration = phi i64 [ 0, %entry ], [ %5, %header__1 ] + %.not = icmp sgt i64 %idxIteration, %g + %5 = add i64 %idxIteration, 1 + br i1 %.not, label %exit__1, label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits0) + ret void +} + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +; Function Attrs: norecurse nounwind readnone willreturn +define internal fastcc i64 @Example__X__body(i64 %value) unnamed_addr #0 { +entry: + %0 = mul i64 %value, 3 + ret i64 %0 +} + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define i64 @Example__Main__Interop() local_unnamed_addr #1 { +entry: + call fastcc void @Example__Main__body() + ret i64 0 +} + +define void @Example__Main() local_unnamed_addr #2 { +entry: + call fastcc void @Example__Main__body() + %0 = call %String* @__quantum__rt__int_to_string(i64 0) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr + +define internal fastcc void @Example__QuantumProgram__body.1() unnamed_addr { +entry: + %.neg = xor i64 3, -1 + %.neg1 = mul i64 %.neg, 3 + %z.neg = add i64 %.neg1, 47 + %y = mul i64 3, 3 + %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) + %0 = add i64 %y, -2 + %1 = lshr i64 %0, 1 + %2 = add i64 %z.neg, %1 + %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) + %3 = sub i64 %y, 1 + %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) + %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) + %4 = call fastcc i64 @Example__X__body(i64 3) + %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) + br label %header__1 + +header__1: ; preds = %header__1, %entry + %idxIteration = phi i64 [ 0, %entry ], [ %5, %header__1 ] + %.not = icmp sgt i64 %idxIteration, 1 + %5 = add i64 %idxIteration, 1 + br i1 %.not, label %exit__1, label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits0) + ret void +} + +define internal fastcc void @Example__QuantumProgram__body.2() unnamed_addr { +entry: + %.neg = xor i64 4, -1 + %.neg1 = mul i64 %.neg, 4 + %z.neg = add i64 %.neg1, 47 + %y = mul i64 4, 3 + %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) + %0 = add i64 %y, -2 + %1 = lshr i64 %0, 1 + %2 = add i64 %z.neg, %1 + %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) + %3 = sub i64 %y, 4 + %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) + %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) + %4 = call fastcc i64 @Example__X__body(i64 4) + %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) + br label %header__1 + +header__1: ; preds = %header__1, %entry + %idxIteration = phi i64 [ 0, %entry ], [ %5, %header__1 ] + %.not = icmp sgt i64 %idxIteration, 4 + %5 = add i64 %idxIteration, 1 + br i1 %.not, label %exit__1, label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits4) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits3) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits2) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits0) + ret void +} + +attributes #0 = { norecurse nounwind readnone willreturn } +attributes #1 = { "InteropFriendly" } +attributes #2 = { "EntryPoint" } diff --git a/src/Passes/include/Llvm.hpp b/src/Passes/include/Llvm.hpp index f24aef3726..80a4728b83 100644 --- a/src/Passes/include/Llvm.hpp +++ b/src/Passes/include/Llvm.hpp @@ -27,10 +27,24 @@ #pragma clang diagnostic ignored "-Weverything" #endif -#include "llvm/IR/LegacyPassManager.h" +// Passes #include "llvm/Passes/PassBuilder.h" #include "llvm/Passes/PassPlugin.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Cloning.h" + +// Building +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/Verifier.h" #if defined(__clang__) #pragma clang diagnostic pop diff --git a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp new file mode 100644 index 0000000000..154c4f9617 --- /dev/null +++ b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp @@ -0,0 +1,223 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" + +#include "Llvm.hpp" + +#include +#include + +namespace microsoft { +namespace quantum { +llvm::PreservedAnalyses ExpandStaticAllocationPass::run(llvm::Function & function, + llvm::FunctionAnalysisManager &fam) +{ + // Pass body + for (auto &basic_block : function) + { + // Keeping track of instructions to remove in each block + std::vector to_remove; + + for (auto &instruction : basic_block) + { + // Finding calls + auto *call_instr = llvm::dyn_cast(&instruction); + if (call_instr == nullptr) + { + continue; + } + + ConstantArguments argument_constants{}; + std::vector remaining_arguments{}; + + auto callee_function = call_instr->getCalledFunction(); + auto &depenency_graph = fam.getResult(*callee_function); + + if (depenency_graph.size() > 0) + { + uint32_t idx = 0; + uint32_t N = static_cast(callee_function->arg_size()); + + // Finding argument constants + while (idx < N) + { + auto arg = callee_function->getArg(idx); + auto value = call_instr->getArgOperand(idx); + + auto cst = llvm::dyn_cast(value); + if (cst != nullptr) + { + argument_constants[arg->getName().str()] = cst; + } + else + { + remaining_arguments.push_back(idx); + } + + ++idx; + } + + // Checking which arrays are constant for this + auto new_callee = ExpandFunctionCall(depenency_graph, *callee_function, argument_constants); + + // Replacing call if a new function was created + if (new_callee != nullptr) + { + llvm::IRBuilder<> builder(call_instr); + (void)call_instr; + + // List with new call arguments + std::vector new_arguments; + for (auto const &i : remaining_arguments) + { + // Getting the i'th argument + llvm::Value *arg = call_instr->getArgOperand(i); + + // Adding arguments that were not constant + if (argument_constants.find(arg->getName().str()) == argument_constants.end()) + { + new_arguments.push_back(arg); + } + } + + // Creating a new call + llvm::Value *new_call = builder.CreateCall(new_callee, new_arguments); + + // Replace all calls to old function with calls to new function + for (auto &use : call_instr->uses()) + { + llvm::User *user = use.getUser(); + user->setOperand(use.getOperandNo(), new_call); + } + + // Schedule original instruction for deletion + to_remove.push_back(&instruction); + } + } + } + + // Removing instructions + for (auto &instruction : to_remove) + { + if (!instruction->use_empty()) + { + instruction->replaceAllUsesWith(llvm::UndefValue::get(instruction->getType())); + } + instruction->eraseFromParent(); + } + } + + return llvm::PreservedAnalyses::none(); +} + +llvm::Function *ExpandStaticAllocationPass::ExpandFunctionCall( + QubitAllocationResult const &depenency_graph, llvm::Function &callee, + ConstantArguments const &const_args) +{ + bool should_replace_function = false; + if (!depenency_graph.empty()) + { + // Checking that any of all allocations in the function + // body becomes static from replacing constant function arguments + for (auto const &allocation : depenency_graph) + { + // Ignoring non-static allocations + if (!allocation.is_possibly_static) + { + continue; + } + + // Ignoring trivial allocations + if (allocation.depends_on.empty()) + { + continue; + } + + // Checking all dependencies are constant + bool all_const = true; + for (auto &name : allocation.depends_on) + { + all_const = all_const && (const_args.find(name) != const_args.end()); + } + + // In case that all dependencies are constant for this + // allocation, we should replace the function with one where + // the arguments are eliminated. + if (all_const) + { + should_replace_function = true; + } + } + } + + // Replacing function if needed + if (should_replace_function) + { + auto module = callee.getParent(); + auto & context = module->getContext(); + llvm::IRBuilder<> builder(context); + + // Copying the original function + llvm::ValueToValueMapTy remapper; + std::vector arg_types; + + // The user might be deleting arguments to the function by specifying them in + // the VMap. If so, we need to not add the arguments to the arg ty vector + // + for (auto const &arg : callee.args()) + { + // Skipping constant arguments + + if (const_args.find(arg.getName().str()) != const_args.end()) + { + continue; + } + + arg_types.push_back(arg.getType()); + } + + // Creating a new function + llvm::FunctionType *function_type = llvm::FunctionType::get( + callee.getFunctionType()->getReturnType(), arg_types, callee.getFunctionType()->isVarArg()); + auto function = llvm::Function::Create(function_type, callee.getLinkage(), + callee.getAddressSpace(), callee.getName(), module); + + // Copying the non-const arguments + auto dest_args_it = function->arg_begin(); + + for (auto const &arg : callee.args()) + { + auto const_it = const_args.find(arg.getName().str()); + if (const_it == const_args.end()) + { + // Mapping remaining function arguments + dest_args_it->setName(arg.getName()); + remapper[&arg] = &*dest_args_it++; + } + else + { + remapper[&arg] = llvm::ConstantInt::get(context, const_it->second->getValue()); + } + } + + llvm::SmallVector returns; // Ignore returns cloned. + + // TODO(tfr): In LLVM 13 upgrade 'true' to 'llvm::CloneFunctionChangeType::LocalChangesOnly' + llvm::CloneFunctionInto(function, &callee, remapper, true, returns, "", nullptr); + + verifyFunction(*function); + + return function; + } + + return nullptr; +} + +bool ExpandStaticAllocationPass::isRequired() +{ + return true; +} + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp new file mode 100644 index 0000000000..8166737e27 --- /dev/null +++ b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp @@ -0,0 +1,46 @@ +#pragma once +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "Llvm.hpp" +#include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" + +#include + +namespace microsoft { +namespace quantum { + +class ExpandStaticAllocationPass : public llvm::PassInfoMixin +{ +public: + using QubitAllocationResult = QubitAllocationAnalysisAnalytics::Result; + using ConstantArguments = std::unordered_map; + + /// Constructors and destructors + /// @{ + ExpandStaticAllocationPass() = default; + ExpandStaticAllocationPass(ExpandStaticAllocationPass const &) = default; + ExpandStaticAllocationPass(ExpandStaticAllocationPass &&) = default; + ~ExpandStaticAllocationPass() = default; + /// @} + + /// Operators + /// @{ + ExpandStaticAllocationPass &operator=(ExpandStaticAllocationPass const &) = default; + ExpandStaticAllocationPass &operator=(ExpandStaticAllocationPass &&) = default; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); + static bool isRequired(); + /// @} + + /// @{ + llvm::Function *ExpandFunctionCall(QubitAllocationResult const &depenency_graph, + llvm::Function &callee, ConstantArguments const &const_args); + /// @} +}; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp b/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp new file mode 100644 index 0000000000..2475aaf8cd --- /dev/null +++ b/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" + +#include "Llvm.hpp" + +#include +#include + +namespace { +llvm::PassPluginLibraryInfo getExpandStaticAllocationPluginInfo() +{ + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "ExpandStaticAllocation", LLVM_VERSION_STRING, [](PassBuilder &pb) { + // Registering the pass + pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, + ArrayRef /*unused*/) { + if (name == "expand-static-allocation") + { + fpm.addPass(ExpandStaticAllocationPass()); + return true; + } + + return false; + }); + }}; +} +} // namespace + +// Interface for loading the plugin +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() +{ + return getExpandStaticAllocationPluginInfo(); +} diff --git a/src/Passes/libs/ExpandStaticAllocation/SPECIFICATION.md b/src/Passes/libs/ExpandStaticAllocation/SPECIFICATION.md new file mode 100644 index 0000000000..5095eea8b3 --- /dev/null +++ b/src/Passes/libs/ExpandStaticAllocation/SPECIFICATION.md @@ -0,0 +1 @@ +# {ExpandStaticAllocation} Specification From 1866d17434397dbc901c8b180b46496d177ffde1 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Wed, 28 Jul 2021 16:47:26 +0200 Subject: [PATCH 45/48] Adding expansion pass to allow allocating arrays statically --- .../ExpandStaticAllocation.cpp | 386 +++++++++--------- .../ExpandStaticAllocation.hpp | 77 ++-- .../LibExpandStaticAllocation.cpp | 46 ++- 3 files changed, 261 insertions(+), 248 deletions(-) diff --git a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp index 154c4f9617..5684864d7a 100644 --- a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp +++ b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.cpp @@ -1,223 +1,227 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" - #include "Llvm.hpp" +#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" + #include #include -namespace microsoft { -namespace quantum { -llvm::PreservedAnalyses ExpandStaticAllocationPass::run(llvm::Function & function, - llvm::FunctionAnalysisManager &fam) +namespace microsoft { - // Pass body - for (auto &basic_block : function) - { - // Keeping track of instructions to remove in each block - std::vector to_remove; - - for (auto &instruction : basic_block) +namespace quantum +{ + llvm::PreservedAnalyses ExpandStaticAllocationPass::run( + llvm::Function& function, + llvm::FunctionAnalysisManager& fam) { - // Finding calls - auto *call_instr = llvm::dyn_cast(&instruction); - if (call_instr == nullptr) - { - continue; - } - - ConstantArguments argument_constants{}; - std::vector remaining_arguments{}; - - auto callee_function = call_instr->getCalledFunction(); - auto &depenency_graph = fam.getResult(*callee_function); - - if (depenency_graph.size() > 0) - { - uint32_t idx = 0; - uint32_t N = static_cast(callee_function->arg_size()); - - // Finding argument constants - while (idx < N) + // Pass body + for (auto& basic_block : function) { - auto arg = callee_function->getArg(idx); - auto value = call_instr->getArgOperand(idx); - - auto cst = llvm::dyn_cast(value); - if (cst != nullptr) - { - argument_constants[arg->getName().str()] = cst; - } - else - { - remaining_arguments.push_back(idx); - } - - ++idx; - } - - // Checking which arrays are constant for this - auto new_callee = ExpandFunctionCall(depenency_graph, *callee_function, argument_constants); + // Keeping track of instructions to remove in each block + std::vector to_remove; - // Replacing call if a new function was created - if (new_callee != nullptr) - { - llvm::IRBuilder<> builder(call_instr); - (void)call_instr; - - // List with new call arguments - std::vector new_arguments; - for (auto const &i : remaining_arguments) - { - // Getting the i'th argument - llvm::Value *arg = call_instr->getArgOperand(i); - - // Adding arguments that were not constant - if (argument_constants.find(arg->getName().str()) == argument_constants.end()) + for (auto& instruction : basic_block) { - new_arguments.push_back(arg); + // Finding calls + auto* call_instr = llvm::dyn_cast(&instruction); + if (call_instr == nullptr) + { + continue; + } + + ConstantArguments argument_constants{}; + std::vector remaining_arguments{}; + + auto callee_function = call_instr->getCalledFunction(); + auto& depenency_graph = fam.getResult(*callee_function); + + if (depenency_graph.size() > 0) + { + uint32_t idx = 0; + auto n = static_cast(callee_function->arg_size()); + + // Finding argument constants + while (idx < n) + { + auto arg = callee_function->getArg(idx); + auto value = call_instr->getArgOperand(idx); + + auto cst = llvm::dyn_cast(value); + if (cst != nullptr) + { + argument_constants[arg->getName().str()] = cst; + } + else + { + remaining_arguments.push_back(idx); + } + + ++idx; + } + + // Checking which arrays are constant for this + auto new_callee = expandFunctionCall(depenency_graph, *callee_function, argument_constants); + + // Replacing call if a new function was created + if (new_callee != nullptr) + { + llvm::IRBuilder<> builder(call_instr); + (void)call_instr; + + // List with new call arguments + std::vector new_arguments; + for (auto const& i : remaining_arguments) + { + // Getting the i'th argument + llvm::Value* arg = call_instr->getArgOperand(i); + + // Adding arguments that were not constant + if (argument_constants.find(arg->getName().str()) == argument_constants.end()) + { + new_arguments.push_back(arg); + } + } + + // Creating a new call + llvm::Value* new_call = builder.CreateCall(new_callee, new_arguments); + + // Replace all calls to old function with calls to new function + for (auto& use : call_instr->uses()) + { + llvm::User* user = use.getUser(); + user->setOperand(use.getOperandNo(), new_call); + } + + // Schedule original instruction for deletion + to_remove.push_back(&instruction); + } + } } - } - - // Creating a new call - llvm::Value *new_call = builder.CreateCall(new_callee, new_arguments); - // Replace all calls to old function with calls to new function - for (auto &use : call_instr->uses()) - { - llvm::User *user = use.getUser(); - user->setOperand(use.getOperandNo(), new_call); - } - - // Schedule original instruction for deletion - to_remove.push_back(&instruction); + // Removing instructions + for (auto& instruction : to_remove) + { + if (!instruction->use_empty()) + { + instruction->replaceAllUsesWith(llvm::UndefValue::get(instruction->getType())); + } + instruction->eraseFromParent(); + } } - } - } - // Removing instructions - for (auto &instruction : to_remove) - { - if (!instruction->use_empty()) - { - instruction->replaceAllUsesWith(llvm::UndefValue::get(instruction->getType())); - } - instruction->eraseFromParent(); + return llvm::PreservedAnalyses::none(); } - } - return llvm::PreservedAnalyses::none(); -} - -llvm::Function *ExpandStaticAllocationPass::ExpandFunctionCall( - QubitAllocationResult const &depenency_graph, llvm::Function &callee, - ConstantArguments const &const_args) -{ - bool should_replace_function = false; - if (!depenency_graph.empty()) - { - // Checking that any of all allocations in the function - // body becomes static from replacing constant function arguments - for (auto const &allocation : depenency_graph) - { - // Ignoring non-static allocations - if (!allocation.is_possibly_static) - { - continue; - } - - // Ignoring trivial allocations - if (allocation.depends_on.empty()) - { - continue; - } - - // Checking all dependencies are constant - bool all_const = true; - for (auto &name : allocation.depends_on) - { - all_const = all_const && (const_args.find(name) != const_args.end()); - } - - // In case that all dependencies are constant for this - // allocation, we should replace the function with one where - // the arguments are eliminated. - if (all_const) - { - should_replace_function = true; - } - } - } - - // Replacing function if needed - if (should_replace_function) - { - auto module = callee.getParent(); - auto & context = module->getContext(); - llvm::IRBuilder<> builder(context); - - // Copying the original function - llvm::ValueToValueMapTy remapper; - std::vector arg_types; - - // The user might be deleting arguments to the function by specifying them in - // the VMap. If so, we need to not add the arguments to the arg ty vector - // - for (auto const &arg : callee.args()) + llvm::Function* ExpandStaticAllocationPass::expandFunctionCall( + QubitAllocationResult const& depenency_graph, + llvm::Function& callee, + ConstantArguments const& const_args) { - // Skipping constant arguments + bool should_replace_function = false; + if (!depenency_graph.empty()) + { + // Checking that any of all allocations in the function + // body becomes static from replacing constant function arguments + for (auto const& allocation : depenency_graph) + { + // Ignoring non-static allocations + if (!allocation.is_possibly_static) + { + continue; + } + + // Ignoring trivial allocations + if (allocation.depends_on.empty()) + { + continue; + } + + // Checking all dependencies are constant + bool all_const = true; + for (auto& name : allocation.depends_on) + { + all_const = all_const && (const_args.find(name) != const_args.end()); + } + + // In case that all dependencies are constant for this + // allocation, we should replace the function with one where + // the arguments are eliminated. + if (all_const) + { + should_replace_function = true; + } + } + } - if (const_args.find(arg.getName().str()) != const_args.end()) - { - continue; - } + // Replacing function if needed + if (should_replace_function) + { + auto module = callee.getParent(); + auto& context = module->getContext(); + llvm::IRBuilder<> builder(context); + + // Copying the original function + llvm::ValueToValueMapTy remapper; + std::vector arg_types; + + // The user might be deleting arguments to the function by specifying them in + // the VMap. If so, we need to not add the arguments to the arg ty vector + // + for (auto const& arg : callee.args()) + { + // Skipping constant arguments - arg_types.push_back(arg.getType()); - } + if (const_args.find(arg.getName().str()) != const_args.end()) + { + continue; + } - // Creating a new function - llvm::FunctionType *function_type = llvm::FunctionType::get( - callee.getFunctionType()->getReturnType(), arg_types, callee.getFunctionType()->isVarArg()); - auto function = llvm::Function::Create(function_type, callee.getLinkage(), - callee.getAddressSpace(), callee.getName(), module); + arg_types.push_back(arg.getType()); + } - // Copying the non-const arguments - auto dest_args_it = function->arg_begin(); + // Creating a new function + llvm::FunctionType* function_type = llvm::FunctionType::get( + callee.getFunctionType()->getReturnType(), arg_types, callee.getFunctionType()->isVarArg()); + auto function = llvm::Function::Create( + function_type, callee.getLinkage(), callee.getAddressSpace(), callee.getName(), module); - for (auto const &arg : callee.args()) - { - auto const_it = const_args.find(arg.getName().str()); - if (const_it == const_args.end()) - { - // Mapping remaining function arguments - dest_args_it->setName(arg.getName()); - remapper[&arg] = &*dest_args_it++; - } - else - { - remapper[&arg] = llvm::ConstantInt::get(context, const_it->second->getValue()); - } - } + // Copying the non-const arguments + auto dest_args_it = function->arg_begin(); - llvm::SmallVector returns; // Ignore returns cloned. + for (auto const& arg : callee.args()) + { + auto const_it = const_args.find(arg.getName().str()); + if (const_it == const_args.end()) + { + // Mapping remaining function arguments + dest_args_it->setName(arg.getName()); + remapper[&arg] = &*dest_args_it++; + } + else + { + remapper[&arg] = llvm::ConstantInt::get(context, const_it->second->getValue()); + } + } - // TODO(tfr): In LLVM 13 upgrade 'true' to 'llvm::CloneFunctionChangeType::LocalChangesOnly' - llvm::CloneFunctionInto(function, &callee, remapper, true, returns, "", nullptr); + llvm::SmallVector returns; // Ignore returns cloned. - verifyFunction(*function); + // TODO(tfr): In LLVM 13 upgrade 'true' to 'llvm::CloneFunctionChangeType::LocalChangesOnly' + llvm::CloneFunctionInto(function, &callee, remapper, true, returns, "", nullptr); - return function; - } + verifyFunction(*function); - return nullptr; -} + return function; + } -bool ExpandStaticAllocationPass::isRequired() -{ - return true; -} + return nullptr; + } + + bool ExpandStaticAllocationPass::isRequired() + { + return true; + } -} // namespace quantum -} // namespace microsoft +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp index 8166737e27..fbee619be2 100644 --- a/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp +++ b/src/Passes/libs/ExpandStaticAllocation/ExpandStaticAllocation.hpp @@ -3,44 +3,49 @@ // Licensed under the MIT License. #include "Llvm.hpp" + #include "QubitAllocationAnalysis/QubitAllocationAnalysis.hpp" #include -namespace microsoft { -namespace quantum { - -class ExpandStaticAllocationPass : public llvm::PassInfoMixin +namespace microsoft { -public: - using QubitAllocationResult = QubitAllocationAnalysisAnalytics::Result; - using ConstantArguments = std::unordered_map; - - /// Constructors and destructors - /// @{ - ExpandStaticAllocationPass() = default; - ExpandStaticAllocationPass(ExpandStaticAllocationPass const &) = default; - ExpandStaticAllocationPass(ExpandStaticAllocationPass &&) = default; - ~ExpandStaticAllocationPass() = default; - /// @} - - /// Operators - /// @{ - ExpandStaticAllocationPass &operator=(ExpandStaticAllocationPass const &) = default; - ExpandStaticAllocationPass &operator=(ExpandStaticAllocationPass &&) = default; - /// @} - - /// Functions required by LLVM - /// @{ - llvm::PreservedAnalyses run(llvm::Function &function, llvm::FunctionAnalysisManager &fam); - static bool isRequired(); - /// @} - - /// @{ - llvm::Function *ExpandFunctionCall(QubitAllocationResult const &depenency_graph, - llvm::Function &callee, ConstantArguments const &const_args); - /// @} -}; - -} // namespace quantum -} // namespace microsoft +namespace quantum +{ + + class ExpandStaticAllocationPass : public llvm::PassInfoMixin + { + public: + using QubitAllocationResult = QubitAllocationAnalysisAnalytics::Result; + using ConstantArguments = std::unordered_map; + + /// Constructors and destructors + /// @{ + ExpandStaticAllocationPass() = default; + ExpandStaticAllocationPass(ExpandStaticAllocationPass const&) = default; + ExpandStaticAllocationPass(ExpandStaticAllocationPass&&) = default; + ~ExpandStaticAllocationPass() = default; + /// @} + + /// Operators + /// @{ + ExpandStaticAllocationPass& operator=(ExpandStaticAllocationPass const&) = default; + ExpandStaticAllocationPass& operator=(ExpandStaticAllocationPass&&) = default; + /// @} + + /// Functions required by LLVM + /// @{ + llvm::PreservedAnalyses run(llvm::Function& function, llvm::FunctionAnalysisManager& fam); + static bool isRequired(); + /// @} + + /// @{ + llvm::Function* expandFunctionCall( + QubitAllocationResult const& depenency_graph, + llvm::Function& callee, + ConstantArguments const& const_args); + /// @} + }; + +} // namespace quantum +} // namespace microsoft diff --git a/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp b/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp index 2475aaf8cd..e73a64b7d8 100644 --- a/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp +++ b/src/Passes/libs/ExpandStaticAllocation/LibExpandStaticAllocation.cpp @@ -1,38 +1,42 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" - #include "Llvm.hpp" +#include "ExpandStaticAllocation/ExpandStaticAllocation.hpp" + #include #include -namespace { +namespace +{ llvm::PassPluginLibraryInfo getExpandStaticAllocationPluginInfo() { - using namespace microsoft::quantum; - using namespace llvm; - - return { - LLVM_PLUGIN_API_VERSION, "ExpandStaticAllocation", LLVM_VERSION_STRING, [](PassBuilder &pb) { - // Registering the pass - pb.registerPipelineParsingCallback([](StringRef name, FunctionPassManager &fpm, - ArrayRef /*unused*/) { - if (name == "expand-static-allocation") - { - fpm.addPass(ExpandStaticAllocationPass()); - return true; - } + using namespace microsoft::quantum; + using namespace llvm; + + return { + LLVM_PLUGIN_API_VERSION, "ExpandStaticAllocation", LLVM_VERSION_STRING, + [](PassBuilder& pb) + { + // Registering the pass + pb.registerPipelineParsingCallback( + [](StringRef name, FunctionPassManager& fpm, ArrayRef /*unused*/) + { + if (name == "expand-static-allocation") + { + fpm.addPass(ExpandStaticAllocationPass()); + return true; + } - return false; - }); - }}; -} + return false; + }); + }}; +} } // namespace // Interface for loading the plugin extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { - return getExpandStaticAllocationPluginInfo(); + return getExpandStaticAllocationPluginInfo(); } From 76a534bf438f416b4e8e330bb36c423b440b5199 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 30 Jul 2021 07:10:59 +0200 Subject: [PATCH 46/48] Removing leading % from command line snippets --- src/Passes/CONTRIBUTING.md | 2 +- src/Passes/README.md | 18 +- src/Passes/docs/continous-integration.md | 10 +- src/Passes/docs/library-structure.md | 2 +- .../examples/OptimisationUsingOpt/README.md | 6 +- .../QubitAllocationAnalysis/README.md | 26 +- .../analysis-example.ll | 438 ++++++++++++++++++ 7 files changed, 470 insertions(+), 32 deletions(-) create mode 100644 src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll diff --git a/src/Passes/CONTRIBUTING.md b/src/Passes/CONTRIBUTING.md index 0b4493bb8d..f44a11a6c5 100644 --- a/src/Passes/CONTRIBUTING.md +++ b/src/Passes/CONTRIBUTING.md @@ -59,7 +59,7 @@ Prefer `#pragma once` over `#ifdef` protection. ## Code TODOs must contain owner name or Github issue ```sh -% ./manage runci +./manage runci (...) QsPasses/src/OpsCounter/OpsCounter.cpp:39:21: error: missing username/bug in TODO [google-readability-todo,-warnings-as-errors] // TODO: Fails to load if this is present diff --git a/src/Passes/README.md b/src/Passes/README.md index 6419c757b0..c1b34e91a5 100644 --- a/src/Passes/README.md +++ b/src/Passes/README.md @@ -204,7 +204,7 @@ For a gentle introduction, see examples. To make it easy to create a new pass, we have created a few templates to get you started quickly: ```sh -% ./manage create-pass HelloWorld +./manage create-pass HelloWorld Available templates: 1. Function Pass @@ -215,9 +215,9 @@ Select a template:1 At the moment you only have one choice which is a function pass. Over time we will add additional templates. Once you have instantiated your template, you are ready to build it: ```sh -% mkdir Debug -% cd Debug -% cmake .. +mkdir Debug +cd Debug +cmake .. -- The C compiler identification is AppleClang 12.0.5.12050022 -- The CXX compiler identification is AppleClang 12.0.5.12050022 (...) @@ -225,7 +225,7 @@ At the moment you only have one choice which is a function pass. Over time we wi -- Generating done -- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/Debug -% make +make [ 25%] Building CXX object libs/CMakeFiles/OpsCounter.dir/OpsCounter/OpsCounter.cpp.o [ 50%] Linking CXX shared library libOpsCounter.dylib @@ -240,9 +240,9 @@ template will not do much except for print the function names of your code. To t build an IR and run the pass: ```sh -% cd ../examples/ClassicalIrCommandline -% make -% opt -load-pass-plugin ../../Debug/libs/libHelloWorld.{dylib,so} --passes="hello-world" -disable-output classical-program.ll +cd ../examples/ClassicalIrCommandline +make +opt -load-pass-plugin ../../Debug/libs/libHelloWorld.{dylib,so} --passes="hello-world" -disable-output classical-program.ll ``` If everything worked, you should see output like this: @@ -303,7 +303,7 @@ that you use a docker image to perform these steps. TODO(TFR): The docker image One error that you may encounter is that an analysis pass does not load with output similar to this: ```sh -% opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -enable-debugify --passes="operation-counter" -disable-output classical-program.bc +opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -enable-debugify --passes="operation-counter" -disable-output classical-program.bc Failed to load passes from '../../Debug/libQSharpPasses.dylib'. Request ignored. opt: unknown pass name 'operation-counter' ``` diff --git a/src/Passes/docs/continous-integration.md b/src/Passes/docs/continous-integration.md index 30cc273ef5..364d230883 100644 --- a/src/Passes/docs/continous-integration.md +++ b/src/Passes/docs/continous-integration.md @@ -3,7 +3,7 @@ In order to run the tests, you first need to build the library. Assuming that this is already done and the corresponding build is in `Debug/`, run the tests from the `Debug` folder: ``` -% lit tests/ -v +lit tests/ -v -- Testing: 2 tests, 2 workers -- PASS: Quantum-Passes :: QubitAllocationAnalysis/case1.ll (1 of 2) PASS: Quantum-Passes :: QubitAllocationAnalysis/case2.ll (2 of 2) @@ -49,13 +49,13 @@ This part defines pipelines for `.hpp` files and `.cpp` files allowing the devel Each of these CI stages can executed individually using `./manage` or you can run the entire CI process by invoking `./manage runci`. An example of what this may look like is here: ```zsh -% ./manage runci +./manage runci 2021-07-21 14:38:04,896 - FormatChecker - ERROR - /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/src/OpsCounter/OpsCounter.cpp was not correctly formatted. 2021-07-21 14:38:04,899 - FormatChecker - ERROR - Your code did not pass formatting. -% ./manage stylecheck --fix-issues -% ./manage runci +./manage stylecheck --fix-issues +./manage runci -- Found LLVM 11.1.0 -- Using LLVMConfig.cmake in: /usr/local/opt/llvm@11/lib/cmake/llvm @@ -78,7 +78,7 @@ Use -header-filter=.* to display errors from all non-system headers. Use -system 2021-07-21 14:38:40,191 - Linter - ERROR - /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/src/OpsCounter/OpsCounter.cpp failed static analysis # ISSUES FIXED MANUALLY -% ./manage runci +./manage runci -- Found LLVM 11.1.0 -- Using LLVMConfig.cmake in: /usr/local/opt/llvm@11/lib/cmake/llvm diff --git a/src/Passes/docs/library-structure.md b/src/Passes/docs/library-structure.md index d1977cb0e1..ae0c42238b 100644 --- a/src/Passes/docs/library-structure.md +++ b/src/Passes/docs/library-structure.md @@ -13,7 +13,7 @@ libs Adding a new pass is easy using the `manage` tool developed in this PR: ``` -% ./manage create-pass HelloWorld +./manage create-pass HelloWorld Available templates: 1. Function Pass diff --git a/src/Passes/examples/OptimisationUsingOpt/README.md b/src/Passes/examples/OptimisationUsingOpt/README.md index 7f84f1b2d5..03cd9db246 100644 --- a/src/Passes/examples/OptimisationUsingOpt/README.md +++ b/src/Passes/examples/OptimisationUsingOpt/README.md @@ -20,10 +20,10 @@ namespace Example { You find the code for this in the folder `SimpleExample`. To generate a QIR for this code, go to the folder and run ```sh -% cd SimpleExample/ -% dotnet clean SimpleExample.csproj +cd SimpleExample/ +dotnet clean SimpleExample.csproj (...) -% dotnet build SimpleExample.csproj -c Debug +dotnet build SimpleExample.csproj -c Debug ``` If everything went well, you should now have a subdirectory called `qir` and inside `qir`, you will find `SimpleExample.ll`. Depending on the version of Q#, diff --git a/src/Passes/examples/QubitAllocationAnalysis/README.md b/src/Passes/examples/QubitAllocationAnalysis/README.md index 383ee026ed..515b641ba4 100644 --- a/src/Passes/examples/QubitAllocationAnalysis/README.md +++ b/src/Passes/examples/QubitAllocationAnalysis/README.md @@ -11,7 +11,7 @@ The following depnds on: Running following command ```sh -% make run +make run ``` will first build the pass, then build the QIR using Q# following by removing the noise using `opt` with optimisation level 1. Finally, it will execute the analysis pass and should provide you with information about qubit allocation in the Q# program defined in `ConstSizeArray/ConstSizeArray.qs`. @@ -21,35 +21,35 @@ will first build the pass, then build the QIR using Q# following by removing the From the Passes root (two levels up from this directory), make a new build ```sh -% mkdir Debug -% cd Debug -% cmake .. +mkdir Debug +cd Debug +cmake .. ``` and then compile the `QubitAllocationAnalysis`: ```sh -% make QubitAllocationAnalysis +make QubitAllocationAnalysis ``` Next return `examples/QubitAllocationAnalysis` and enter the directory `ConstSizeArray` to build the QIR: ```sh -% make analysis-example.ll +make analysis-example.ll ``` or execute the commands manually, ```sh -% dotnet build ConstSizeArray.csproj -% opt -S qir/ConstSizeArray.ll -O1 > ../analysis-example.ll -% make clean +dotnet build ConstSizeArray.csproj +opt -S qir/ConstSizeArray.ll -O1 > ../analysis-example.ll +make clean ``` Returning to `examples/QubitAllocationAnalysis`, the pass can now be ran by executing: ```sh -% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll ``` ## Example cases @@ -92,7 +92,7 @@ entry: Running the pass procudes following output: ``` -% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll Example__QuantumProgram__body ==================== @@ -152,7 +152,7 @@ entry: The analyser returns following output: ``` -% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll Example__QuantumProgram__body ==================== @@ -196,7 +196,7 @@ namespace Example { We will omit the QIR in the documenation as it is a long. The output of the anaysis is: ``` -% opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll +opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib --passes="print" -disable-output analysis-example.ll Example__QuantumProgram__body ==================== diff --git a/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll new file mode 100644 index 0000000000..6f6c98c8e0 --- /dev/null +++ b/src/Passes/examples/QubitAllocationAnalysis/analysis-example.ll @@ -0,0 +1,438 @@ +; ModuleID = 'qir/ConstSizeArray.ll' +source_filename = "qir/ConstSizeArray.ll" + +%Tuple = type opaque +%Qubit = type opaque +%Array = type opaque +%Result = type opaque +%Callable = type opaque +%String = type opaque + +@Microsoft__Quantum__Qir__Emission__M = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Qir__Emission__M__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@0 = internal constant [3 x i8] c", \00" +@1 = internal constant [2 x i8] c"[\00" +@2 = internal constant [2 x i8] c"]\00" + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) local_unnamed_addr + +declare void @__quantum__qis__cnot__adj(%Qubit*, %Qubit*) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare %Tuple* @__quantum__rt__tuple_create(i64) local_unnamed_addr + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) local_unnamed_addr + +define internal fastcc %Result* @Microsoft__Quantum__Qir__Emission__M__body(%Qubit* %q) unnamed_addr { +entry: + %0 = call %Result* @__quantum__qis__m__body(%Qubit* %q) + ret %Result* %0 +} + +declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr + +define internal fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %a, %Qubit* %b, %Qubit* %c) unnamed_addr { +entry: + call void @__quantum__qis__cnot__body(%Qubit* %c, %Qubit* %b) + call void @__quantum__qis__cnot__body(%Qubit* %c, %Qubit* %a) + call void @__quantum__qis__toffoli__body(%Qubit* %a, %Qubit* %b, %Qubit* %c) + ret void +} + +declare void @__quantum__qis__toffoli__body(%Qubit*, %Qubit*, %Qubit*) local_unnamed_addr + +define internal fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %a, %Qubit* %b, %Qubit* %c) unnamed_addr { +entry: + call void @__quantum__qis__toffoli__adj(%Qubit* %a, %Qubit* %b, %Qubit* %c) + call void @__quantum__qis__cnot__adj(%Qubit* %c, %Qubit* %a) + call void @__quantum__qis__cnot__adj(%Qubit* %c, %Qubit* %b) + ret void +} + +declare void @__quantum__qis__toffoli__adj(%Qubit*, %Qubit*, %Qubit*) local_unnamed_addr + +define internal fastcc %Array* @Microsoft__Quantum__Qir__Emission__RunAdder__body() unnamed_addr { +entry: + %a = call %Array* @__quantum__rt__qubit_allocate_array(i64 4) + call void @__quantum__rt__array_update_alias_count(%Array* %a, i32 1) + %b = call %Array* @__quantum__rt__qubit_allocate_array(i64 4) + call void @__quantum__rt__array_update_alias_count(%Array* %b, i32 1) + %cin = call %Qubit* @__quantum__rt__qubit_allocate() + %cout = call %Qubit* @__quantum__rt__qubit_allocate() + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %q = load %Qubit*, %Qubit** %1, align 8 + call void @__quantum__qis__x__body(%Qubit* %q) + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %b) + %3 = add i64 %2, -1 + %.not1 = icmp slt i64 %3, 0 + br i1 %.not1, label %exit__1, label %body__1 + +body__1: ; preds = %entry, %body__1 + %4 = phi i64 [ %7, %body__1 ], [ 0, %entry ] + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 %4) + %6 = bitcast i8* %5 to %Qubit** + %q__1 = load %Qubit*, %Qubit** %6, align 8 + call void @__quantum__qis__x__body(%Qubit* %q__1) + %7 = add i64 %4, 1 + %.not = icmp sgt i64 %7, %3 + br i1 %.not, label %exit__1, label %body__1 + +exit__1: ; preds = %body__1, %entry + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %cin, %Qubit* %10, %Qubit* %13) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) + %15 = bitcast i8* %14 to %Qubit** + %16 = load %Qubit*, %Qubit** %15, align 8 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 1) + %18 = bitcast i8* %17 to %Qubit** + %19 = load %Qubit*, %Qubit** %18, align 8 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) + %21 = bitcast i8* %20 to %Qubit** + %22 = load %Qubit*, %Qubit** %21, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %16, %Qubit* %19, %Qubit* %22) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) + %24 = bitcast i8* %23 to %Qubit** + %25 = load %Qubit*, %Qubit** %24, align 8 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 2) + %27 = bitcast i8* %26 to %Qubit** + %28 = load %Qubit*, %Qubit** %27, align 8 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) + %30 = bitcast i8* %29 to %Qubit** + %31 = load %Qubit*, %Qubit** %30, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %25, %Qubit* %28, %Qubit* %31) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) + %33 = bitcast i8* %32 to %Qubit** + %34 = load %Qubit*, %Qubit** %33, align 8 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 3) + %36 = bitcast i8* %35 to %Qubit** + %37 = load %Qubit*, %Qubit** %36, align 8 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 3) + %39 = bitcast i8* %38 to %Qubit** + %40 = load %Qubit*, %Qubit** %39, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__body(%Qubit* %34, %Qubit* %37, %Qubit* %40) + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 3) + %42 = bitcast i8* %41 to %Qubit** + %c = load %Qubit*, %Qubit** %42, align 8 + call void @__quantum__qis__cnot__body(%Qubit* %c, %Qubit* %cout) + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) + %44 = bitcast i8* %43 to %Qubit** + %45 = load %Qubit*, %Qubit** %44, align 8 + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 3) + %47 = bitcast i8* %46 to %Qubit** + %48 = load %Qubit*, %Qubit** %47, align 8 + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 3) + %50 = bitcast i8* %49 to %Qubit** + %51 = load %Qubit*, %Qubit** %50, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %45, %Qubit* %48, %Qubit* %51) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) + %53 = bitcast i8* %52 to %Qubit** + %54 = load %Qubit*, %Qubit** %53, align 8 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 2) + %56 = bitcast i8* %55 to %Qubit** + %57 = load %Qubit*, %Qubit** %56, align 8 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 2) + %59 = bitcast i8* %58 to %Qubit** + %60 = load %Qubit*, %Qubit** %59, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %54, %Qubit* %57, %Qubit* %60) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) + %62 = bitcast i8* %61 to %Qubit** + %63 = load %Qubit*, %Qubit** %62, align 8 + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 1) + %65 = bitcast i8* %64 to %Qubit** + %66 = load %Qubit*, %Qubit** %65, align 8 + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 1) + %68 = bitcast i8* %67 to %Qubit** + %69 = load %Qubit*, %Qubit** %68, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %63, %Qubit* %66, %Qubit* %69) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %b, i64 0) + %71 = bitcast i8* %70 to %Qubit** + %72 = load %Qubit*, %Qubit** %71, align 8 + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %a, i64 0) + %74 = bitcast i8* %73 to %Qubit** + %75 = load %Qubit*, %Qubit** %74, align 8 + call fastcc void @Microsoft__Quantum__Qir__Emission__Majority__adj(%Qubit* %cin, %Qubit* %72, %Qubit* %75) + %76 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* nonnull @Microsoft__Quantum__Qir__Emission__M, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %77 = call fastcc %Array* @Microsoft__Quantum__Qir__Emission___73da7dcac81a47ddabb1a0e30be3dfdb_ForEach__body(%Callable* %76, %Array* %b) + call void @__quantum__rt__array_update_alias_count(%Array* %b, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %a, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %76, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %76, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %cin) + call void @__quantum__rt__qubit_release(%Qubit* %cout) + call void @__quantum__rt__qubit_release_array(%Array* %b) + call void @__quantum__rt__qubit_release_array(%Array* %a) + ret %Array* %77 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr + +declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) local_unnamed_addr + +define internal fastcc %Array* @Microsoft__Quantum__Qir__Emission___73da7dcac81a47ddabb1a0e30be3dfdb_ForEach__body(%Callable* %action, %Array* %array) unnamed_addr { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 1) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %2 = add i64 %1, -1 + %.not9 = icmp slt i64 %2, 0 + br i1 %.not9, label %exit__1, label %body__1 + +body__1: ; preds = %entry, %exit__4 + %3 = phi i64 [ %32, %exit__4 ], [ 0, %entry ] + %res.010 = phi %Array* [ %14, %exit__4 ], [ %0, %entry ] + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %3) + %5 = bitcast i8* %4 to %Qubit** + %item = load %Qubit*, %Qubit** %5, align 8 + %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %8 = bitcast i8* %7 to %Result** + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 8) + %10 = bitcast %Tuple* %9 to %Qubit** + store %Qubit* %item, %Qubit** %10, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 8) + call void @__quantum__rt__callable_invoke(%Callable* %action, %Tuple* %9, %Tuple* %11) + %12 = bitcast %Tuple* %11 to %Result** + %13 = load %Result*, %Result** %12, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + store %Result* %13, %Result** %8, align 8 + %14 = call %Array* @__quantum__rt__array_concatenate(%Array* %res.010, %Array* %6) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) + %16 = add i64 %15, -1 + %.not57 = icmp slt i64 %16, 0 + br i1 %.not57, label %exit__2, label %body__2 + +exit__1: ; preds = %exit__4, %entry + %res.0.lcssa = phi %Array* [ %0, %entry ], [ %14, %exit__4 ] + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %res.0.lcssa, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret %Array* %res.0.lcssa + +body__2: ; preds = %body__1, %body__2 + %17 = phi i64 [ %21, %body__2 ], [ 0, %body__1 ] + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 %17) + %19 = bitcast i8* %18 to %Result** + %20 = load %Result*, %Result** %19, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %20, i32 1) + %21 = add i64 %17, 1 + %.not5 = icmp sgt i64 %21, %16 + br i1 %.not5, label %exit__2, label %body__2 + +exit__2: ; preds = %body__2, %body__1 + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %res.010, i32 -1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %23 = bitcast i8* %22 to %Result** + %24 = load %Result*, %Result** %23, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %24, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %res.010) + %26 = add i64 %25, -1 + %.not68 = icmp slt i64 %26, 0 + br i1 %.not68, label %exit__4, label %body__4 + +body__4: ; preds = %exit__2, %body__4 + %27 = phi i64 [ %31, %body__4 ], [ 0, %exit__2 ] + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %res.010, i64 %27) + %29 = bitcast i8* %28 to %Result** + %30 = load %Result*, %Result** %29, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %30, i32 -1) + %31 = add i64 %27, 1 + %.not6 = icmp sgt i64 %31, %26 + br i1 %.not6, label %exit__4, label %body__4 + +exit__4: ; preds = %body__4, %exit__2 + call void @__quantum__rt__array_update_reference_count(%Array* %res.010, i32 -1) + %32 = add i64 %3, 1 + %.not = icmp sgt i64 %32, %2 + br i1 %.not, label %exit__1, label %body__1 +} + +define internal void @Microsoft__Quantum__Qir__Emission__M__body__wrapper(%Tuple* nocapture readnone %capture-tuple, %Tuple* nocapture readonly %arg-tuple, %Tuple* nocapture %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to %Qubit** + %1 = load %Qubit*, %Qubit** %0, align 8 + %2 = call fastcc %Result* @Microsoft__Quantum__Qir__Emission__M__body(%Qubit* %1) + %3 = bitcast %Tuple* %result-tuple to %Result** + store %Result* %2, %Result** %3, align 8 + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) local_unnamed_addr + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) local_unnamed_addr + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) local_unnamed_addr + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) local_unnamed_addr + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) local_unnamed_addr + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) local_unnamed_addr + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) local_unnamed_addr + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) local_unnamed_addr + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) local_unnamed_addr + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +define { i64, i8* }* @Microsoft__Quantum__Qir__Emission__RunAdder__Interop() local_unnamed_addr #0 { +entry: + %0 = call fastcc %Array* @Microsoft__Quantum__Qir__Emission__RunAdder__body() + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %2 = call i8* @__quantum__rt__memory_allocate(i64 %1) + %3 = ptrtoint i8* %2 to i64 + %4 = add i64 %1, -1 + %.not5 = icmp slt i64 %4, 0 + br i1 %.not5, label %exit__1, label %body__1 + +body__1: ; preds = %entry, %body__1 + %5 = phi i64 [ %14, %body__1 ], [ 0, %entry ] + %6 = add i64 %5, %3 + %7 = inttoptr i64 %6 to i8* + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %5) + %9 = bitcast i8* %8 to %Result** + %10 = load %Result*, %Result** %9, align 8 + %11 = call %Result* @__quantum__rt__result_get_zero() + %12 = call i1 @__quantum__rt__result_equal(%Result* %10, %Result* %11) + %not. = xor i1 %12, true + %13 = sext i1 %not. to i8 + store i8 %13, i8* %7, align 1 + %14 = add i64 %5, 1 + %.not = icmp sgt i64 %14, %4 + br i1 %.not, label %exit__1, label %body__1 + +exit__1: ; preds = %body__1, %entry + %15 = call i8* @__quantum__rt__memory_allocate(i64 16) + %16 = bitcast i8* %15 to i64* + store i64 %1, i64* %16, align 4 + %17 = getelementptr i8, i8* %15, i64 8 + %18 = bitcast i8* %17 to i8** + store i8* %2, i8** %18, align 8 + %.not34 = icmp slt i64 %4, 0 + br i1 %.not34, label %exit__2, label %body__2 + +body__2: ; preds = %exit__1, %body__2 + %19 = phi i64 [ %23, %body__2 ], [ 0, %exit__1 ] + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %19) + %21 = bitcast i8* %20 to %Result** + %22 = load %Result*, %Result** %21, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %22, i32 -1) + %23 = add i64 %19, 1 + %.not3 = icmp sgt i64 %23, %4 + br i1 %.not3, label %exit__2, label %body__2 + +exit__2: ; preds = %body__2, %exit__1 + %24 = bitcast i8* %15 to { i64, i8* }* + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret { i64, i8* }* %24 +} + +declare i8* @__quantum__rt__memory_allocate(i64) local_unnamed_addr + +declare %Result* @__quantum__rt__result_get_zero() local_unnamed_addr + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr + +define void @Microsoft__Quantum__Qir__Emission__RunAdder() local_unnamed_addr #1 { +entry: + %0 = call fastcc %Array* @Microsoft__Quantum__Qir__Emission__RunAdder__body() + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i64 0, i64 0)) + %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @1, i64 0, i64 0)) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %4 = add i64 %3, -1 + %.not7 = icmp slt i64 %4, 0 + br i1 %.not7, label %exit__1, label %body__1 + +body__1: ; preds = %entry, %condContinue__1 + %5 = phi i64 [ %14, %condContinue__1 ], [ 0, %entry ] + %6 = phi %String* [ %13, %condContinue__1 ], [ %2, %entry ] + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %5) + %8 = bitcast i8* %7 to %Result** + %9 = load %Result*, %Result** %8, align 8 + %.not5 = icmp eq %String* %6, %2 + br i1 %.not5, label %condContinue__1, label %condTrue__1 + +condTrue__1: ; preds = %body__1 + %10 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %1) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %body__1 + %11 = phi %String* [ %10, %condTrue__1 ], [ %6, %body__1 ] + %12 = call %String* @__quantum__rt__result_to_string(%Result* %9) + %13 = call %String* @__quantum__rt__string_concatenate(%String* %11, %String* %12) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + %14 = add i64 %5, 1 + %.not = icmp sgt i64 %14, %4 + br i1 %.not, label %exit__1, label %body__1 + +exit__1: ; preds = %condContinue__1, %entry + %.lcssa = phi %String* [ %2, %entry ], [ %13, %condContinue__1 ] + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i64 0, i64 0)) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %.lcssa, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %.lcssa, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + call void @__quantum__rt__message(%String* %16) + %.not46 = icmp slt i64 %4, 0 + br i1 %.not46, label %exit__2, label %body__2 + +body__2: ; preds = %exit__1, %body__2 + %17 = phi i64 [ %21, %body__2 ], [ 0, %exit__1 ] + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %17) + %19 = bitcast i8* %18 to %Result** + %20 = load %Result*, %Result** %19, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %20, i32 -1) + %21 = add i64 %17, 1 + %.not4 = icmp sgt i64 %21, %4 + br i1 %.not4, label %exit__2, label %body__2 + +exit__2: ; preds = %body__2, %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare %String* @__quantum__rt__string_create(i8*) local_unnamed_addr + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) local_unnamed_addr + +declare %String* @__quantum__rt__result_to_string(%Result*) local_unnamed_addr + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } From ae42ad2d32e7968bf2c964547c952c3c9f0fa156 Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 30 Jul 2021 07:44:23 +0200 Subject: [PATCH 47/48] Fixing broken things from merge --- src/Passes/.clang-tidy.orig | 125 ------ src/Passes/CMakeLists.txt.orig | 52 --- src/Passes/CONTRIBUTING.md.orig | 97 ----- src/Passes/Makefile.orig | 10 - src/Passes/README.md.orig | 367 ------------------ src/Passes/docs/index.md.orig | 7 - .../ClassicalIrCommandline/Makefile.orig | 26 -- .../ClassicalIrCommandline/README.md.orig | 60 --- .../classical-program.c.orig | 21 - .../OptimisationUsingOpt/README.md.orig | 70 ---- .../SimpleExample/Makefile.orig | 17 - src/Passes/include/Llvm.hpp | 2 +- src/Passes/include/Llvm.hpp.orig | 62 --- src/Passes/libs/CMakeLists.txt.orig | 49 --- src/Passes/requirements.txt.orig | 5 - .../site-packages/TasksCI/builder.py.orig | 139 ------- src/Passes/site-packages/TasksCI/cli.py.orig | 282 -------------- .../site-packages/TasksCI/formatting.py.orig | 222 ----------- .../site-packages/TasksCI/linting.py.orig | 158 -------- .../site-packages/TasksCI/settings.py.orig | 21 - .../site-packages/TasksCI/toolchain.py.orig | 48 --- 21 files changed, 1 insertion(+), 1839 deletions(-) delete mode 100644 src/Passes/.clang-tidy.orig delete mode 100644 src/Passes/CMakeLists.txt.orig delete mode 100644 src/Passes/CONTRIBUTING.md.orig delete mode 100644 src/Passes/Makefile.orig delete mode 100644 src/Passes/README.md.orig delete mode 100644 src/Passes/docs/index.md.orig delete mode 100644 src/Passes/examples/ClassicalIrCommandline/Makefile.orig delete mode 100644 src/Passes/examples/ClassicalIrCommandline/README.md.orig delete mode 100644 src/Passes/examples/ClassicalIrCommandline/classical-program.c.orig delete mode 100644 src/Passes/examples/OptimisationUsingOpt/README.md.orig delete mode 100644 src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile.orig delete mode 100644 src/Passes/include/Llvm.hpp.orig delete mode 100644 src/Passes/libs/CMakeLists.txt.orig delete mode 100644 src/Passes/requirements.txt.orig delete mode 100644 src/Passes/site-packages/TasksCI/builder.py.orig delete mode 100644 src/Passes/site-packages/TasksCI/cli.py.orig delete mode 100644 src/Passes/site-packages/TasksCI/formatting.py.orig delete mode 100644 src/Passes/site-packages/TasksCI/linting.py.orig delete mode 100644 src/Passes/site-packages/TasksCI/settings.py.orig delete mode 100644 src/Passes/site-packages/TasksCI/toolchain.py.orig diff --git a/src/Passes/.clang-tidy.orig b/src/Passes/.clang-tidy.orig deleted file mode 100644 index 277cf2b186..0000000000 --- a/src/Passes/.clang-tidy.orig +++ /dev/null @@ -1,125 +0,0 @@ -Checks: "-*,bugprone-*,\ --readability-*,\ -readability-identifier-*,\ -readability-redundant-member-init,\ -readability-braces-around-statements,\ -cert-dcl*,\ -cert-env*,\ -cert-err52-cpp,\ -cert-err60-cpp,\ -cert-flp30-c,\ -clang-analyzer-security.FloatLoopCounter,\ -google-build-explicit-make-pair,\ -google-build-namespaces,\ -google-explicit-constructor,\ -google-readability-*,\ -google-runtime-operator,\ -hicpp-exception-baseclass,\ -hicpp-explicit-conversions,\ -hicpp-use-*,\ -modernize-avoid-bind,\ -modernize-loop-convert,\ -modernize-make-shared,\ -modernize-make-unique,\ -modernize-redundant-void-arg,\ -modernize-replace-random-shuffle,\ -modernize-shrink-to-fit,\ -modernize-use-bool-literals,\ -modernize-use-default-member-init,\ -modernize-use-emplace,\ -modernize-use-equals-default,\ -modernize-use-equals-delete,\ -modernize-use-noexcept,\ -modernize-use-nullptr,\ -modernize-use-override,\ -modernize-use-transparent-functors,\ -misc-*,\ --misc-misplaced-widening-cast,\ -performance-*" - -WarningsAsErrors: '*' -HeaderFilterRegex: '.*' - -CheckOptions: - # Configuration documentation: https://clang.llvm.org/extra/clang-tidy/checks/readability-identifier-naming.html - # Namespaces - - key: readability-identifier-naming.NamespaceCase - value: 'lower_case' - - # Classes and structs - - key: readability-identifier-naming.AbstractClassPrefix - value: 'I' - - key: readability-identifier-naming.ClassCase - value: 'CamelCase' - - key: readability-identifier-naming.StructCase - value: 'CamelCase' - - key: readability-identifier-naming.UnionCase - value: 'CamelCase' - - # Class members - - key: readability-identifier-naming.PrivateMemberCase - value: 'lower_case' - - key: readability-identifier-naming.PrivateMemberSuffix - value: '_' - - key: readability-identifier-naming.ProtectedMemberCase - value: 'lower_case' - - key: readability-identifier-naming.ProtectedMemberSuffix - value: '_' - -<<<<<<< HEAD - # Type Alias and Enum Types / constants -======= - # Alias ->>>>>>> features/llvm-passes - - key: readability-identifier-naming.TypeAliasCase - value: 'CamelCase' - - key: readability-identifier-naming.TypedefCase - value: 'CamelCase' -<<<<<<< HEAD - - key: readability-identifier-naming.EnumCase - value: 'CamelCase' - - key: readability-identifier-naming.EnumConstantCase - value: 'CamelCase' - - # Globals, consts and enums - - key: readability-identifier-naming.GlobalConstantCase - value: 'UPPER_CASE' - - key: readability-identifier-naming.GlobalConstantPrefix - value: 'G_' - - key: readability-identifier-naming.ConstantCase - value: 'UPPER_CASE' -======= ->>>>>>> features/llvm-passes - - # Functions - - key: readability-identifier-naming.FunctionCase - value: 'camelBack' - - key: readability-identifier-naming.IgnoreMainLikeFunctions - value: true - - # Variables and parameters - - key: readability-identifier-naming.VariableCase - value: 'lower_case' - - key: readability-identifier-naming.LocalVariableCase - value: 'lower_case' - - key: readability-identifier-naming.ParameterCase - value: 'lower_case' - -<<<<<<< HEAD -======= - # Globals, consts and enums - - key: readability-identifier-naming.GlobalConstantCase - value: 'UPPER_CASE' - - key: readability-identifier-naming.GlobalConstantPrefix - value: 'G_' - - key: readability-identifier-naming.ConstantCase - value: 'UPPER_CASE' - - key: readability-identifier-naming.EnumCase - value: 'CamelCase' - - key: readability-identifier-naming.EnumConstantCase - value: 'CamelCase' - ->>>>>>> features/llvm-passes - # Macros - - key: readability-identifier-naming.MacroDefinitionCase - value: 'UPPER_CASE' diff --git a/src/Passes/CMakeLists.txt.orig b/src/Passes/CMakeLists.txt.orig deleted file mode 100644 index cbe62ebbd8..0000000000 --- a/src/Passes/CMakeLists.txt.orig +++ /dev/null @@ -1,52 +0,0 @@ -cmake_minimum_required(VERSION 3.4.3) - -project(QSharpPasses) - -find_package(LLVM REQUIRED CONFIG) -include(CheckCXXCompilerFlag) - -message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") -message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") - -# Setting the standard configuration for the C++ compiler -# Rather than allowing C++17, we restrict ourselves to -# C++14 as this is the standard currently used by the LLVM -# project for compilation of the framework. While there is -# a very small chance that the difference in standard -# would break things, it is a possibility nonetheless. -set(CMAKE_CXX_STANDARD 14) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Weverything -Wconversion -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-padded -Wno-exit-time-destructors -Wno-global-constructors") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror ") - -# LLVM is normally built without RTTI. Be consistent with that. -if(NOT LLVM_ENABLE_RTTI) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") -endif() - -# -fvisibility-inlines-hidden is set when building LLVM and on Darwin warnings -# are triggered if llvm-tutor is built without this flag (though otherwise it -# builds fine). For consistency, add it here too. -check_cxx_compiler_flag("-fvisibility-inlines-hidden" SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG) -if (${SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG} EQUAL "1") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") -endif() - -# We export the compile commands which are needed by clang-tidy -# to run the static analysis -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -# Adding LLVM include directories. We may choose -# to move this to a module level at a later point -include_directories(${LLVM_INCLUDE_DIRS}) -link_directories(${LLVM_LIBRARY_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) -include_directories(${CMAKE_SOURCE_DIR}/src) - -# Adding the libraries -add_subdirectory(libs) -<<<<<<< HEAD -add_subdirectory(tests) -======= ->>>>>>> features/llvm-passes diff --git a/src/Passes/CONTRIBUTING.md.orig b/src/Passes/CONTRIBUTING.md.orig deleted file mode 100644 index 463e3f8d75..0000000000 --- a/src/Passes/CONTRIBUTING.md.orig +++ /dev/null @@ -1,97 +0,0 @@ -# Contributing (Proposal - WiP) - -This document is work in progress and nothing is set in stone. In case you do not want to feel like reading this style guide, just run - -```sh -./manage runci -``` - -<<<<<<< HEAD -from the `QsPasses` directory as all points defined in this document is automatically enforces. You can then refer to this guide for an explanation for why and how. -======= -from the `Passes` directory as all points defined in this document is automatically enforces. You can then refer to this guide for an explanation for why and how. ->>>>>>> features/llvm-passes - -## Why do we need a style guide? - -Consistency and readibility such that it is easy to read and understand code that was not written by yourself. For example, if one developer uses `CamelCase` for namespaces and `snake_case` for classes while another uses `snake_case` for namespaces and `CamelCase` you may end up with code sections that looks like this - -```cpp -int32_t main() -{ - name_space1::Class1 hello; - NameSpace2::class_name world; -} -``` - -which is hard to read. - -## What does the style guide apply to? - -<<<<<<< HEAD -The style guide applies to any new code written as well as code that is being refactored added to the `QsPasses` library. We do not rewrite existing code for the sake just changing the style. -======= -The style guide applies to any new code written as well as code that is being refactored added to the `Passes` library. We do not rewrite existing code for the sake just changing the style. ->>>>>>> features/llvm-passes - -## Style discrepency - -In case of a discrepency between this guideline and `clang-tidy` or `clang-format`, -clang tools rule. In case of discrency between this guide and any guides subsequently referenced guides, this guide rule. However, feel free to suggest changes. Changes will be incorporated on the basis -that updated styles are apply to new code and not existing code. - -## Naming - -Naming is taken from the [Microsoft AirSim](https://github.com/microsoft/AirSim/blob/master/docs/coding_guidelines.md) project. - -| **Code Element** | **Style** | **Comment** | -| --------------------- | -------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | -| Namespace | snake_case | Differentiates `namespace::ClassName` and `ClassName::SubClass` names | -| Class name | CamelCase | To differentiate from STL types which ISO recommends (do not use "C" or "T" prefixes) | -| Function name | camelCase | Lower case start is almost universal except for .Net world | -| Parameters/Locals | snake_case | Vast majority of standards recommends this because \_ is more readable to C++ crowd (although not much to Java/.Net crowd) | -| Member variables | snake_case_with\_ | The prefix \_ is heavily discouraged as ISO has rules around reserving \_identifiers, so we recommend suffix instead | -| Enums and its members | CamelCase | Most except very old standards agree with this one | -| Globals | g_snake_case | Avoid using globals whenever possible, but if you have to use `g_`. | -| Constants | UPPER_CASE | Very contentious and we just have to pick one here, unless if is a private constant in class or method, then use naming for Members or Locals | -| File names | Match case of class name in file | Lot of pro and cons either way but this removes inconsistency in auto generated code (important for ROS) | - -## Modernise when possible - -In general, modernise the code where possible. For instance, prefer `using` of `typedef`. - -## Header guards - -Prefer `#pragma once` over `#ifdef` protection. - -## Code TODOs must contain owner name or Github issue - -```sh -<<<<<<< HEAD -./manage runci -(...) -QsPasses/src/OpsCounter/OpsCounter.cpp:39:21: error: missing username/bug in TODO [google-readability-todo,-warnings-as-errors] -======= -% ./manage runci -(...) -Passes/src/OpsCounter/OpsCounter.cpp:39:21: error: missing username/bug in TODO [google-readability-todo,-warnings-as-errors] ->>>>>>> features/llvm-passes - // TODO: Fails to load if this is present - ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // TODO(tfr): Fails to load if this is present -``` - -## Always add copyrights - -Always add copyrights at the top of the file. - -```text -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. -``` - -For header files, prefer to put `#prama once` before the copyright. - -## Tabs vs. spaces - -Seriously, this should not even be a discussion: It does not matter. If you prefer one over the other feel free to write in whatever style you prefer as long as you use `clang-format` before making a PR. Again, the key here is consistency and readibility. diff --git a/src/Passes/Makefile.orig b/src/Passes/Makefile.orig deleted file mode 100644 index 746045c729..0000000000 --- a/src/Passes/Makefile.orig +++ /dev/null @@ -1,10 +0,0 @@ -nothing: - @echo "Preventing the user from accidently running the clean command." - -clean: - rm -rf Release/ - rm -rf Debug/ -<<<<<<< HEAD - -======= ->>>>>>> features/llvm-passes diff --git a/src/Passes/README.md.orig b/src/Passes/README.md.orig deleted file mode 100644 index fa21630a5e..0000000000 --- a/src/Passes/README.md.orig +++ /dev/null @@ -1,367 +0,0 @@ -<<<<<<< HEAD -# Q# Passes for LLVM - -This library defines [LLVM passes](https://llvm.org/docs/Passes.html) used for analysing, optimising and transforming the IR. The Q# pass library is a dynamic library that can be compiled and ran separately from the -======= -# QIR Passes for LLVM - -This library defines [LLVM passes](https://llvm.org/docs/Passes.html) used for analysing, optimising and transforming the IR. The QIR pass library is a dynamic library that can be compiled and ran separately from the ->>>>>>> features/llvm-passes -rest of the project code. While it is not clear whether this possible at the moment, we hope that it will be possible to write passes that enforce the [QIR specification](https://github.com/microsoft/qsharp-language/tree/main/Specifications/QIR). - -## What do LLVM passes do? - -Before getting started, we here provide a few examples of classical use cases for [LLVM passes](https://llvm.org/docs/Passes.html). You find additional [instructive examples here][1]. - -**Example 1: Transformation**. As a first example of what [LLVM passes](https://llvm.org/docs/Passes.html) can do, we look at optimisation. Consider a compiler which -compiles - -```c -double test(double x) { - return (1+2+x)*(x+(1+2)); -} -``` - -into following IR: - -``` -define double @test(double %x) { -entry: - %addtmp = fadd double 3.000000e+00, %x - %addtmp1 = fadd double %x, 3.000000e+00 - %multmp = fmul double %addtmp, %addtmp1 - ret double %multmp -} -``` - -This code is obviously inefficient as we could get rid of one operation by rewritting the code to: - -```c -double test(double x) { - double y = 3+x; - return y * y; -} -``` - -One purpose of [LLVM passes](https://llvm.org/docs/Passes.html) is to allow automatic transformation from the above IR to the IR: - -``` -define double @test(double %x) { -entry: - %addtmp = fadd double %x, 3.000000e+00 - %multmp = fmul double %addtmp, %addtmp - ret double %multmp -} -``` - -**Example 2: Analytics**. Another example of useful passes are those generating and collecting statistics about the program. For instance, one analytics program -makes sense for classical programs is to count instructions used to implement functions. Take the C program: - -```c -int foo(int x) -{ - return x; -} - -void bar(int x, int y) -{ - foo(x + y); -} - -int main() -{ - foo(2); - bar(3, 2); - - return 0; -} -``` - -which produces follow IR (without optimisation): - -```language -define dso_local i32 @foo(i32 %0) #0 { - %2 = alloca i32, align 4 - store i32 %0, i32* %2, align 4 - %3 = load i32, i32* %2, align 4 - ret i32 %3 -} - -define dso_local void @bar(i32 %0, i32 %1) #0 { - %3 = alloca i32, align 4 - %4 = alloca i32, align 4 - store i32 %0, i32* %3, align 4 - store i32 %1, i32* %4, align 4 - %5 = load i32, i32* %3, align 4 - %6 = load i32, i32* %4, align 4 - %7 = add nsw i32 %5, %6 - %8 = call i32 @foo(i32 %7) - ret void -} - -define dso_local i32 @main() #0 { - %1 = alloca i32, align 4 - store i32 0, i32* %1, align 4 - %2 = call i32 @foo(i32 2) - call void @bar(i32 3, i32 2) - ret i32 0 -} -``` - -A stat pass for this code, would collect following statisics: - -```text -Stats for 'foo' -=========================== -Opcode # Used ---------------------------- -load 1 -ret 1 -alloca 1 -store 1 ---------------------------- - -Stats for 'bar' -=========================== -Opcode # Used ---------------------------- -load 2 -add 1 -ret 1 -alloca 2 -store 2 -call 1 ---------------------------- - -Stats for 'main' -=========================== -Opcode # Used ---------------------------- -ret 1 -alloca 1 -store 1 -call 2 ---------------------------- -``` - -**Example 3: Code validation**. A third use case is code validation. For example, one could write a pass to check whether bounds are exceeded on [static arrays][2]. -Note that this is a non-standard usecase as such analysis is usually made using the AST rather than at the IR level. - -**References** - -- [1] https://github.com/banach-space/llvm-tutor#analysis-vs-transformation-pass -- [2] https://github.com/victor-fdez/llvm-array-check-pass - -## Out-of-source Pass - -This library is build as set of out-of-source-passes. All this means is that we will not be downloading the LLVM repository and modifying this repository directly. You can read more [here](https://llvm.org/docs/CMake.html#cmake-out-of-source-pass). - -# Getting started - -## Dependencies - -This library is written in C++ and depends on: - -- LLVM - -Additional development dependencies include: - -- CMake -- clang-format -- clang-tidy - -## Building the passes - -To build the passes, create a new build directory and switch to that directory: - -```sh -mkdir Debug -cd Debug/ -``` - -To build the library, first configure CMake from the build directory - -```sh -cmake .. -``` - -and then make your target - -```sh -make [target] -``` - -<<<<<<< HEAD -The default target is `all`. Other valid targets are the name of the folders in `libs/` found in the passes root. -======= -Valid targets are the name of the folders in `libs/` found in the passes root. ->>>>>>> features/llvm-passes - -## Running a pass - -You can run a pass using [opt](https://llvm.org/docs/CommandGuide/opt.html) as follows: - -```sh -cd examples/ClassicalIrCommandline -make emit-llvm-bc -opt -load-pass-plugin ../../{Debug,Release}/libOpsCounter.{dylib,so} --passes="print" -disable-output classical-program.bc -``` - -<<<<<<< HEAD -For a gentle introduction, see examples. -======= -For a detailed tutorial, see examples. ->>>>>>> features/llvm-passes - -## Creating a new pass - -To make it easy to create a new pass, we have created a few templates to get you started quickly: - -```sh -<<<<<<< HEAD -./manage create-pass HelloWorld -======= -% ./manage create-pass HelloWorld ->>>>>>> features/llvm-passes -Available templates: - -1. Function Pass - -Select a template:1 -``` - -At the moment you only have one choice which is a function pass. Over time we will add additional templates. Once you have instantiated your template, you are ready to build it: - -```sh -<<<<<<< HEAD -mkdir Debug -cd Debug -cmake .. -======= -% mkdir Debug -% cd Debug -% cmake .. ->>>>>>> features/llvm-passes --- The C compiler identification is AppleClang 12.0.5.12050022 --- The CXX compiler identification is AppleClang 12.0.5.12050022 -(...) --- Configuring done --- Generating done -<<<<<<< HEAD --- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/QsPasses/Debug - -make -======= --- Build files have been written to: ./qsharp-compiler/src/Passes/Debug - -% make ->>>>>>> features/llvm-passes - -[ 25%] Building CXX object libs/CMakeFiles/OpsCounter.dir/OpsCounter/OpsCounter.cpp.o -[ 50%] Linking CXX shared library libOpsCounter.dylib -[ 50%] Built target OpsCounter -[ 75%] Building CXX object libs/CMakeFiles/HelloWorld.dir/HelloWorld/HelloWorld.cpp.o -[100%] Linking CXX shared library libHelloWorld.dylib -[100%] Built target HelloWorld -``` - -Your new pass is ready to be implemented. Open `libs/HelloWorld/HelloWorld.cpp` to implement the details of the pass. At the moment, the -template will not do much except for print the function names of your code. To test your new pass go to the directory `examples/ClassicalIrCommandline`, -build an IR and run the pass: - -```sh -<<<<<<< HEAD -cd ../examples/ClassicalIrCommandline -make -opt -load-pass-plugin ../../Debug/libs/libHelloWorld.{dylib,so} --passes="hello-world" -disable-output classical-program.ll -======= -% cd ../examples/ClassicalIrCommandline -% make -% opt -load-pass-plugin ../../Debug/libs/libHelloWorld.{dylib,so} --passes="hello-world" -disable-output classical-program.ll ->>>>>>> features/llvm-passes -``` - -If everything worked, you should see output like this: - -```sh -Implement your pass here: foo -Implement your pass here: bar -Implement your pass here: main -``` - -## CI - -Before making a pull request with changes to this library, please ensure that style checks passes, that the code compiles, -unit test passes and that there are no erros found by the static analyser. - -To setup the CI environment, run following commands - -```sh -source develop.env -virtualenv develop__venv -source develop__venv/bin/activate -pip install -r requirements.txt -``` - -These adds the necessary environment variables to ensure that you have the `TasksCI` package and all required dependencies. - -To check the style, run - -```sh -./manage stylecheck -``` - -To test that the code compiles and tests passes run - -```sh -./manage test -``` - -Finally, to analyse the code, run - -```sh -./manage lint -``` - -You can run all processes by running: - -```sh -./manage runci -``` - -As `clang-tidy` and `clang-format` acts slightly different from version to version and on different platforms, it is recommended -that you use a docker image to perform these steps. TODO(TFR): The docker image is not added yet and this will be documented in the future. - -# Developer FAQ - -## Pass does not load - -One error that you may encounter is that an analysis pass does not load with output similar to this: - -```sh -<<<<<<< HEAD -opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -enable-debugify --passes="operation-counter" -disable-output classical-program.bc -======= -% opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -enable-debugify --passes="operation-counter" -disable-output classical-program.bc ->>>>>>> features/llvm-passes -Failed to load passes from '../../Debug/libQSharpPasses.dylib'. Request ignored. -opt: unknown pass name 'operation-counter' -``` - -This is likely becuase you have forgotten to instantiate static class members. For instance, in the case of an instance of `llvm::AnalysisInfoMixin` you are required to have static member `Key`: - -```cpp -class COpsCounterPass : public llvm::AnalysisInfoMixin { -private: - static llvm::AnalysisKey Key; //< REQUIRED by llvm registration - friend struct llvm::AnalysisInfoMixin; -}; -``` - -If you forget to instantiate this variable in your corresponding `.cpp` file, - -```cpp -// llvm::AnalysisKey COpsCounterPass::Key; //< Uncomment this line to make everything work -``` - -everything will compile, but the pass will fail to load. There will be no linking errors either. diff --git a/src/Passes/docs/index.md.orig b/src/Passes/docs/index.md.orig deleted file mode 100644 index 2fe6b79b93..0000000000 --- a/src/Passes/docs/index.md.orig +++ /dev/null @@ -1,7 +0,0 @@ -<<<<<<< HEAD -# Q# pass documentation -======= -# QIR pass documentation ->>>>>>> features/llvm-passes - -This directory and file is a placeholder for describing LLVM passes which was already implemented. diff --git a/src/Passes/examples/ClassicalIrCommandline/Makefile.orig b/src/Passes/examples/ClassicalIrCommandline/Makefile.orig deleted file mode 100644 index c19b98c8ca..0000000000 --- a/src/Passes/examples/ClassicalIrCommandline/Makefile.orig +++ /dev/null @@ -1,26 +0,0 @@ -<<<<<<< HEAD -emit-llvm-cpp: - clang -O3 -S -std=c++17 -emit-llvm classical-program.cpp -o classical-program.ll - -======= ->>>>>>> features/llvm-passes -emit-llvm: - clang -O0 -S -emit-llvm classical-program.c -o classical-program.ll - -emit-llvm-bc: - clang -O0 -c -emit-llvm classical-program.c -o classical-program.bc - - -debug-ng-pass-mac: emit-llvm-bc - opt -load-pass-plugin ../../Debug/libQSharpPasses.dylib -debug --passes="operation-counter" -disable-output classical-program.bc - - - -clean: - rm -f classical-program.ll -<<<<<<< HEAD - rm -f classical-program.bc -======= - rm -f classical-program.bc - ->>>>>>> features/llvm-passes diff --git a/src/Passes/examples/ClassicalIrCommandline/README.md.orig b/src/Passes/examples/ClassicalIrCommandline/README.md.orig deleted file mode 100644 index 4ee98bc634..0000000000 --- a/src/Passes/examples/ClassicalIrCommandline/README.md.orig +++ /dev/null @@ -1,60 +0,0 @@ -# Emitting classical IRs - -This example demonstrates how to emit a classical IR and run a custom -pass on it. The purpose of this example is to teach the user how to apply -a pass to a IR using commandline tools only. - -IRs can be represented either by a human readible language or through bytecode. For -C programs former is generated by - -```sh -<<<<<<< HEAD - clang -O1 -S -emit-llvm classical-program.c -o classical-program.ll -``` - -where as the latter is generated writing: - -```sh - clang -O1 -c -emit-llvm classical-program.c -o classical-program.bc -======= -% clang -O1 -S -emit-llvm classical-program.c -o classical-program.ll -``` - -whereas the latter is generated by executing: - -```sh -% clang -O1 -c -emit-llvm classical-program.c -o classical-program.bc ->>>>>>> features/llvm-passes -``` - -This generates a nice and short IR which makes not too overwhelming to understand what is going on. - -## Legacy passes - -<<<<<<< HEAD -This part assumes that you have build the QsPasses library. - -```sh -opt -load ../../{Debug,Release}/libQSharpPasses.{dylib,so} -legacy-operation-counter -analyze classical-program.ll -======= -This part assumes that you have built the Passes library. - -```sh -% opt -load ../../{Debug,Release}/libQSharpPasses.{dylib,so} -legacy-operation-counter -analyze classical-program.ll ->>>>>>> features/llvm-passes -``` - -## Next-gen passes - -<<<<<<< HEAD -This part assumes that you have build the QsPasses library. - -```sh -opt -load-pass-plugin ../../{Debug,Release}/libs/libQSharpPasses.{dylib,so} --passes="print" -disable-output classical-program.bc -======= -This part assumes that you have built the Passes library. - -```sh -% opt -load-pass-plugin ../../{Debug,Release}/libQSharpPasses.{dylib,so} --passes="operation-counter" -disable-output classical-program.bc ->>>>>>> features/llvm-passes -``` diff --git a/src/Passes/examples/ClassicalIrCommandline/classical-program.c.orig b/src/Passes/examples/ClassicalIrCommandline/classical-program.c.orig deleted file mode 100644 index 70e6777170..0000000000 --- a/src/Passes/examples/ClassicalIrCommandline/classical-program.c.orig +++ /dev/null @@ -1,21 +0,0 @@ -int foo(int x) -{ - return x; -} - -void bar(int x, int y) -{ - foo(x + y); -} - -int main() -{ - foo(2); - bar(3, 2); - - return 0; -<<<<<<< HEAD -} -======= -} ->>>>>>> features/llvm-passes diff --git a/src/Passes/examples/OptimisationUsingOpt/README.md.orig b/src/Passes/examples/OptimisationUsingOpt/README.md.orig deleted file mode 100644 index ea94699bf1..0000000000 --- a/src/Passes/examples/OptimisationUsingOpt/README.md.orig +++ /dev/null @@ -1,70 +0,0 @@ -# Optimisation Using Opt - -In this document, we give a brief introduction on how to perform IR optimisations -using `opt`. - -## Stripping dead code - -We start out by considering a simple case of a program that just returns 0: - -```qsharp -namespace Example { - @EntryPoint() - operation OurAwesomeQuantumProgram(nQubits : Int) : Int { - - return 0; - } -} -``` - -You find the code for this in the folder `SimpleExample`. To generate a QIR for this code, go to the folder and run - -```sh -<<<<<<< HEAD -cd SimpleExample/ -dotnet clean SimpleExample.csproj -(...) -dotnet build SimpleExample.csproj -c Debug -``` - -If everything went well, you should now have a subdirectory called `qir` and inside `qir`, you will find `SimpleExample.ll`. Depending on the version of Q#, -======= -% cd SimpleExample/ -% dotnet clean SimpleExample.csproj -(...) -% dotnet build SimpleExample.csproj -c Debug -``` - -If everything went well, you should now have a subdirectory called `qir` and inside `qir`, you will find `SimpleExample.ll`. Depending on your compiler, ->>>>>>> features/llvm-passes -the generated QIR will vary, but in general, it will be relatively long. Looking at this file, you will see -that the total length is a little above 2000 lines of code. That is pretty extensive for a program which essentially -does nothing so obviously, most of the generated QIR must be dead code. We can now use `opt` to get rid of the dead code and we do this by invoking: - -```sh -opt -S qir/SimpleExample.ll -O3 > qir/SimpleExample-O3.ll -``` - -All going well, this should reduce your QIR to - -```language -; Function Attrs: norecurse nounwind readnone willreturn -define i64 @Example__QuantumFunction__Interop(i64 %nQubits) local_unnamed_addr #0 { -entry: - ret i64 0 -} - -define void @Example__QuantumFunction(i64 %nQubits) local_unnamed_addr #1 { -entry: - %0 = tail call %String* @__quantum__rt__int_to_string(i64 0) - tail call void @__quantum__rt__message(%String* %0) - tail call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} -``` - -<<<<<<< HEAD -plus a few extra delcarations. -======= -with a few additional declarations. ->>>>>>> features/llvm-passes diff --git a/src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile.orig b/src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile.orig deleted file mode 100644 index 390ae68736..0000000000 --- a/src/Passes/examples/OptimisationUsingOpt/SimpleExample/Makefile.orig +++ /dev/null @@ -1,17 +0,0 @@ -<<<<<<< HEAD -======= -all: qir/SimpleExample.ll - -qir/SimpleExample.ll: - dotnet build SimpleExample.csproj -c Debug - ->>>>>>> features/llvm-passes -clean: - rm -rf bin - rm -rf obj - rm -rf qir -<<<<<<< HEAD - -======= - ->>>>>>> features/llvm-passes diff --git a/src/Passes/include/Llvm.hpp b/src/Passes/include/Llvm.hpp index 1f54a2ae33..80a4728b83 100644 --- a/src/Passes/include/Llvm.hpp +++ b/src/Passes/include/Llvm.hpp @@ -33,7 +33,7 @@ #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" -q + // Building #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constants.h" diff --git a/src/Passes/include/Llvm.hpp.orig b/src/Passes/include/Llvm.hpp.orig deleted file mode 100644 index 54378660c7..0000000000 --- a/src/Passes/include/Llvm.hpp.orig +++ /dev/null @@ -1,62 +0,0 @@ -#pragma once -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#pragma GCC diagnostic ignored "-Wpedantic" -#pragma GCC diagnostic ignored "-Wunused-value" -#pragma GCC diagnostic ignored "-Wsign-compare" -#pragma GCC diagnostic ignored "-Wunknown-warning-option" -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Wall" -#pragma GCC diagnostic ignored "-Weverything" -#endif - -#if defined(__clang__) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wconversion" -#pragma clang diagnostic ignored "-Wpedantic" -#pragma clang diagnostic ignored "-Werror" -#pragma clang diagnostic ignored "-Wshadow" -#pragma clang diagnostic ignored "-Wreturn-std-move" -#pragma clang diagnostic ignored "-Wunknown-warning-option" -#pragma clang diagnostic ignored "-Wunused-parameter" -#pragma clang diagnostic ignored "-Wall" -#pragma clang diagnostic ignored "-Weverything" -#endif - -<<<<<<< HEAD -// Passes -#include "llvm/Passes/PassBuilder.h" -#include "llvm/Passes/PassPlugin.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Transforms/Utils/BasicBlockUtils.h" -#include "llvm/Transforms/Utils/Cloning.h" - -// Building -#include "llvm/IR/BasicBlock.h" -#include "llvm/IR/Constants.h" -#include "llvm/IR/DerivedTypes.h" -#include "llvm/IR/Function.h" -#include "llvm/IR/IRBuilder.h" -#include "llvm/IR/LLVMContext.h" -#include "llvm/IR/LegacyPassManager.h" -#include "llvm/IR/Module.h" -#include "llvm/IR/Type.h" -#include "llvm/IR/Verifier.h" -======= -#include "llvm/IR/LegacyPassManager.h" -#include "llvm/Passes/PassBuilder.h" -#include "llvm/Passes/PassPlugin.h" -#include "llvm/Support/raw_ostream.h" ->>>>>>> features/llvm-passes - -#if defined(__clang__) -#pragma clang diagnostic pop -#endif - -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif diff --git a/src/Passes/libs/CMakeLists.txt.orig b/src/Passes/libs/CMakeLists.txt.orig deleted file mode 100644 index 0feef87119..0000000000 --- a/src/Passes/libs/CMakeLists.txt.orig +++ /dev/null @@ -1,49 +0,0 @@ - -macro(list_qs_passes result) - file(GLOB children RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/*) - set(dirlist "") - foreach(child ${children}) - if(IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/${child}) - list(APPEND dirlist ${child}) - endif() - endforeach() - set(${result} ${dirlist}) -endmacro() - -list_qs_passes(ALL_PASSES) - -foreach(pass_plugin ${ALL_PASSES}) - - # Getting sources - file(GLOB sources RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/${pass_plugin}/*.cpp) - - # Adding library - add_library(${pass_plugin} - SHARED - ${sources}) - - # Adding include directories - target_include_directories( - ${pass_plugin} - PRIVATE - "${CMAKE_CURRENT_SOURCE_DIR}" - ) - - target_include_directories( - ${pass_plugin} - PRIVATE - "${CMAKE_CURRENT_SOURCE_DIR}/../include" - ) - - - # Linking - target_link_libraries(${pass_plugin} - "$<$:-undefined dynamic_lookup>") - -endforeach() -<<<<<<< HEAD - - -# add_library(passes SHARED ${ALL_PASSES}) -======= ->>>>>>> features/llvm-passes diff --git a/src/Passes/requirements.txt.orig b/src/Passes/requirements.txt.orig deleted file mode 100644 index f605709872..0000000000 --- a/src/Passes/requirements.txt.orig +++ /dev/null @@ -1,5 +0,0 @@ -click==8.0.1 -<<<<<<< HEAD -lit==12.0.1 -======= ->>>>>>> features/llvm-passes diff --git a/src/Passes/site-packages/TasksCI/builder.py.orig b/src/Passes/site-packages/TasksCI/builder.py.orig deleted file mode 100644 index 2ad3fc8aca..0000000000 --- a/src/Passes/site-packages/TasksCI/builder.py.orig +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import os -from . import settings -from . import toolchain -from .settings import PROJECT_ROOT -import logging -import subprocess -import sys -<<<<<<< HEAD -======= -from typing import Union - -OptionalInt = Union[int, None] -OptionalStr = Union[str, None] - ->>>>>>> features/llvm-passes - -logger = logging.getLogger() - - -<<<<<<< HEAD -def configure_cmake(build_dir: str, generator=None): -======= -def configure_cmake(build_dir: str, generator=None) -> None: ->>>>>>> features/llvm-passes - """ - Function that creates a build directory and runs - cmake to configure make, ninja or another generator. - """ - - logger.info("Source: {}".format(PROJECT_ROOT)) - logger.info("Build : {}".format(build_dir)) - - os.chdir(PROJECT_ROOT) - os.makedirs(build_dir, exist_ok=True) - - cmake_cmd = [toolchain.discover_cmake()] - - if generator is not None: - cmake_cmd += ['-G', generator] - - cmake_cmd += [PROJECT_ROOT] - - exit_code = subprocess.call(cmake_cmd, cwd=build_dir) - if exit_code != 0: - logger.error('Failed to configure project') - sys.exit(exit_code) - - -<<<<<<< HEAD -def build_project(build_dir: str, generator=None, concurrency=None): -======= -def build_project(build_dir: str, generator: OptionalStr = None, concurrency: OptionalInt = None) -> None: ->>>>>>> features/llvm-passes - """ - Given a build directory, this function builds all targets using - a specified generator and concurrency. - """ - - if generator in ["make", None]: - cmd = ["make"] - elif generator in ["ninja"]: - cmd = ["ninja"] - - if concurrency is None: -<<<<<<< HEAD - concurrency = settings.get_concurrency() -======= - concurrency = settings.get_degree_of_concurrency() ->>>>>>> features/llvm-passes - - cmd.append('-j{}'.format(concurrency)) - - exit_code = subprocess.call(cmd, cwd=build_dir) - - if exit_code != 0: - logger.error('Failed to make the project') - sys.exit(exit_code) - - -<<<<<<< HEAD -def run_tests(build_dir: str, concurrency=None): - """ - Runs the unit tests given a build directory. - """ - fail = False - - # Running lit tests - lit_cmd = ["lit", "tests/", "-v"] - exit_code = subprocess.call(lit_cmd, cwd=build_dir) - - if exit_code != 0: - logger.error('Lit test failed') - fail = True - - # Running CMake tests -======= -def run_tests(build_dir: str, concurrency: OptionalInt = None) -> None: - """ - Runs the unit tests given a build directory. - """ - ->>>>>>> features/llvm-passes - cmake_cmd = [toolchain.discover_ctest()] - - if concurrency is not None: - raise BaseException("No support for concurrent testing at the moment.") - - exit_code = subprocess.call(cmake_cmd, cwd=build_dir) - if exit_code != 0: -<<<<<<< HEAD - logger.error('CTest failed project') - fail = True - - if fail: - sys.exit(exit_code) - - -def main(build_dir: str, generator=None, test: bool = False): -======= - logger.error('Failed to configure project') - sys.exit(exit_code) - - -def main(build_dir: str, generator: OptionalStr = None, test: bool = False) -> None: ->>>>>>> features/llvm-passes - """ - Runs the entire build process by first configuring, the building - and optionally testing the codebase. - """ - - configure_cmake(build_dir, generator) - - build_project(build_dir, generator) - - if test: - run_tests(build_dir) diff --git a/src/Passes/site-packages/TasksCI/cli.py.orig b/src/Passes/site-packages/TasksCI/cli.py.orig deleted file mode 100644 index 82c051d2f7..0000000000 --- a/src/Passes/site-packages/TasksCI/cli.py.orig +++ /dev/null @@ -1,282 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .formatting import main as style_check_main -from .builder import main as builder_main -from .linting import main as lint_main, clang_tidy_diagnose - -import click -import logging -import sys -import os -import re -<<<<<<< HEAD -======= -from typing import Union - -OptionalInt = Union[int, None] -OptionalStr = Union[str, None] ->>>>>>> features/llvm-passes - -# Important directories -LIB_DIR = os.path.abspath(os.path.dirname((__file__))) -TEMPLATE_DIR = os.path.join(LIB_DIR, "templates") -SOURCE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(LIB_DIR))) - -# Logging configuration -logger = logging.getLogger() -ch = logging.StreamHandler() -ch.setLevel(logging.DEBUG) -formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') -ch.setFormatter(formatter) -logger.addHandler(ch) - -# By default we only log errors -logger.setLevel(logging.ERROR) - - -@click.group() -@click.option('--loglevel', default="error") -<<<<<<< HEAD -def cli(loglevel): -======= -def cli(loglevel: str) -> None: ->>>>>>> features/llvm-passes - """ - Implements the general CLI options such as logging level. - """ - - # Valid values - levels = { - "critical": 50, - "error": 40, - "warning": 30, - "info": 20, - "debug": 10, - "notset": 0 - } - - # Getting the logging level and updating - loglevel = loglevel.lower() - if loglevel not in levels: - logger.critical("Invalid log level") - sys.exit(-1) - - logger.setLevel(levels[loglevel]) - logger.info("Loglevel set to {}".format(loglevel)) - - -@cli.command() -@click.option('--fix-issues', default=False, is_flag=True) -<<<<<<< HEAD -def stylecheck(fix_issues): -======= -def stylecheck(fix_issues: bool) -> None: ->>>>>>> features/llvm-passes - """ - Command for checking the style and optionally fixing issues. - Note that some issues are not automatically fixed. - """ - - logger.info("Invoking style checker") - style_check_main(fix_issues) - - -@cli.command() -@click.option("--diagnose", default=False, is_flag=True) -@click.option('--fix-issues', default=False, is_flag=True) -@click.option('--force', default=False, is_flag=True) -<<<<<<< HEAD -def lint(diagnose, fix_issues, force): -======= -def lint(diagnose: bool, fix_issues: bool, force: bool) -> None: ->>>>>>> features/llvm-passes - """ - Command for linting the code. - """ - - # Helpful option in order to diagnose Clang tidy. - if diagnose: - clang_tidy_diagnose() - - # In case we are diagnosing, no run is performed. - return - - # Allowing Clang tidy to attempt to fix issues. Generally, - # it is discouraged to use this features as it may result in - # a catastrophy - if fix_issues: - if not force: - print("""Fixing isssues using Clang Tidy will break your code. -Make sure that you have committed your changes BEFORE DOING THIS. -Even so, this feature is experimental and there have been reports of -clang-tidy modying system libraries - therefore, USE THIS FEATURE AT -YOUR OWN RISK. - -Write 'I understand' to proceed.""") - print(":") - x = input() - if x.lower() != "i understand": - print("Wrong answer - stopping!") - exit(-1) - - # Running the linter - logger.info("Invoking linter") - lint_main(fix_issues) - - -@cli.command() -@click.option('--debug/--no-debug', default=True) -@click.option('--generator', default=None) -<<<<<<< HEAD -def test(debug, generator): -======= -def test(debug: bool, generator: OptionalStr) -> None: ->>>>>>> features/llvm-passes - """ - Command to build and test the code base. - """ - - logger.info("Building and testing") - - build_dir = "Debug" - if not debug: - build_dir = "Release" - - builder_main(build_dir, generator, True) - - -@cli.command() -<<<<<<< HEAD -def runci(): -======= -def runci() -> None: ->>>>>>> features/llvm-passes - """ - Command to run all CI commands, starting with style check - then linting and finally unit tests. - """ - - build_dir = "Debug" - - style_check_main(False) - lint_main(False) - builder_main(build_dir, None, True) - - -@cli.command() -@click.argument( - "name" -) -@click.option( - "--template", - default=None, -) -<<<<<<< HEAD -def create_pass(name, template): -======= -def create_pass(name: str, template: OptionalStr) -> None: ->>>>>>> features/llvm-passes - """ - Helper command to create a new pass from a template. Templates - can be found in the template directory of the TasksCI tool. - """ - - # Checking whether the target already exists - target_dir = os.path.join(SOURCE_DIR, "libs", name) - if os.path.exists(target_dir): - logger.error("Pass '{}' already exists".format(name)) - exit(-1) - - # In case no template was specified, we list the option - # such that the user can choose one - if template is None: - - # Listing options - options = [] - print("Available templates:") - print("") - for template_name in os.listdir(TEMPLATE_DIR): - if os.path.isdir(os.path.join(TEMPLATE_DIR, template_name)): - options.append(template_name) - - # Printing option - pretty_template_name = re.sub(r'(? len(options) + 1: - try: - n = input("Select a template:") -======= - print("Type 'q' or 'quit' to abort.") - print("") - while n < 1 or n > len(options) + 1: - try: - n = input("Select a template:") - - if n == "q" or n == "quit": - logger.info("User aborted.") - exit(0) - ->>>>>>> features/llvm-passes - n = int(n) - except: # noqa: E722 - logger.error("Invalid choice") - exit(-1) - - # Getting the template - template = options[n - 1] - - # Checking that the template is valid. Note that even though - # we list the templates above, the user may have specified an - # invalid template via the command line. - template_dir = os.path.join(TEMPLATE_DIR, template) - if not os.path.exists(template_dir): - logger.error("Template does not exist") - exit(-1) - - # Creating an operation name by transforming the original name - # from "CamelCase" to "camel-case" - operation_name = re.sub(r'(?>>>>>> features/llvm-passes - -logger = logging.getLogger("FormatChecker") -CLANG_FORMAT_EXE = discover_formatter() - - -<<<<<<< HEAD -def require_token(token, filename, contents, cursor, fix_issues): -======= -def require_token(token: str, filename: str, contents: str, cursor: int, fix_issues: bool) -> int: ->>>>>>> features/llvm-passes - """ - Validator function to require that the next part of the document is a specific token. - """ - failed = False - if not contents[cursor:].startswith(token): - logger.error("{}: File must have {} at position {}".format(filename, token, cursor)) - failed = True - return cursor + len(token), failed - - -<<<<<<< HEAD -def require_pragma_once(filename, contents, cursor, fix_issues): -======= -def require_pragma_once(filename: str, contents: str, cursor: int, fix_issues: bool) -> int: ->>>>>>> features/llvm-passes - """ - Validator function that requires '#pragma once' in headers - """ - return require_token("#pragma once\n", filename, contents, cursor, fix_issues) - - -<<<<<<< HEAD -def enforce_cpp_license(filename, contents, cursor, fix_issues): -======= -def enforce_cpp_license(filename: str, contents: str, cursor: int, fix_issues: bool) -> int: ->>>>>>> features/llvm-passes - """ - Validator function that requires copyrights in C++ files - """ - return require_token("""// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -""", filename, contents, cursor, fix_issues) - - -<<<<<<< HEAD -def enforce_py_license(filename, contents, cursor, fix_issues): -======= -def enforce_py_license(filename: str, contents: str, cursor: int, fix_issues: bool) -> int: ->>>>>>> features/llvm-passes - """ - Validator function that requires copyrights in Python files - """ - # Allowing empty files - if contents.strip() == "": - return cursor, False - - return require_token("""# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -""", filename, contents, cursor, fix_issues) - - -<<<<<<< HEAD -def enforce_formatting(filename, contents, cursor, fix_issues): -======= -def enforce_formatting(filename: str, contents: str, cursor: int, fix_issues: bool) -> int: ->>>>>>> features/llvm-passes - """ - Validator function that tests whether the style of the C++ - source file follows that dictated by `.clang-format`. - """ - - # Opening a pipe with Clang format - p = subprocess.Popen( - [CLANG_FORMAT_EXE, '-style=file'], - stdout=subprocess.PIPE, - stdin=subprocess.PIPE, - cwd=PROJECT_ROOT) - - # Passing the contents of the file - output = p.communicate(input=contents.encode())[0] - - # In case something went wrong, we raise an exception - if p.returncode != 0: - raise Exception('Could not format contents') - - # Otherwise we check that the input is the same as the output - formatted = output.decode('utf-8') - if formatted != contents: - - # Updating the contents of the file if requested by the user - if fix_issues: - logger.info("Formatting {}".format(filename)) - with open(filename, "w") as filebuffer: - filebuffer.write(formatted) - return cursor, False - - logger.error("{} was not correctly formatted.".format(filename)) - return cursor, True - - return cursor, False - - -# Source pipeline definitions. These instructs the next part of -# the code on how to validate each source file. -<<<<<<< HEAD -======= - ->>>>>>> features/llvm-passes -SOURCE_PIPELINES = [ - { - "name": "C++ Main", - "src": path.join(PROJECT_ROOT, "libs"), - - "pipelines": { - "hpp": [ - require_pragma_once, - enforce_cpp_license, - enforce_formatting - ], - "cpp": [ - enforce_cpp_license, - enforce_formatting - ] - } - }, - { - "name": "Scripts", - "src": path.join(PROJECT_ROOT, "site-packages"), - - "pipelines": { - "py": [ - enforce_py_license, - ], - } - } -] - - -<<<<<<< HEAD -def execute_pipeline(pipeline, filename: str, fix_issues: bool): -======= -def execute_pipeline(pipeline: IPipeline, filename: str, fix_issues: bool) -> bool: ->>>>>>> features/llvm-passes - """ - Helper function to execute a pipeline for a specific file - """ - logger.info("Executing pipeline for {}".format(filename)) - cursor = 0 - - # Reading the file - with open(filename, "r") as fb: - contents = fb.read() - - # Executing each step of the pipeline - failed = False - for fnc in pipeline: - cursor, f = fnc(filename, contents, cursor, fix_issues) - failed = failed or f - - return failed - - -<<<<<<< HEAD -def main(fix_issues: bool = False): -======= -def main(fix_issues: bool = False) -> None: ->>>>>>> features/llvm-passes - """ - This function runs a pipeline for every file that - matches the description given in SOURCE_PIPELINES. - """ - failed = False - - # Iterating through every definition - for language in SOURCE_PIPELINES: - - logger.info("Formatting {}".format(language["name"])) - basedir = language["src"] - pipelines = language["pipelines"] - - # Finding all files whose location matches that of the - # definition - for root, dirs, files in os.walk(basedir): - - for filename in files: - if "." not in filename: - continue - - # Executing the pipeline if appropriate - _, ext = filename.rsplit(".", 1) - if ext in pipelines: - f = execute_pipeline(pipelines[ext], path.join(root, filename), fix_issues) - failed = failed or f - - if failed: - logger.error("Your code did not pass formatting.") - sys.exit(-1) - - -if __name__ == "__main__": - main() diff --git a/src/Passes/site-packages/TasksCI/linting.py.orig b/src/Passes/site-packages/TasksCI/linting.py.orig deleted file mode 100644 index 7145fc7b0f..0000000000 --- a/src/Passes/site-packages/TasksCI/linting.py.orig +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import logging -from .builder import configure_cmake, build_project -from . import toolchain -from .settings import PROJECT_ROOT -import os -import subprocess -import sys -<<<<<<< HEAD -======= -from typing import Union - -OptionalInt = Union[int, None] -OptionalStr = Union[str, None] ->>>>>>> features/llvm-passes - -logger = logging.getLogger("Linter") - - -<<<<<<< HEAD -def clang_tidy_diagnose(): -======= -def clang_tidy_diagnose() -> None: ->>>>>>> features/llvm-passes - """ - Helper function to print the configuration of Clang tidy - """ - - # Getting the config - config = subprocess.check_output( - [toolchain.discover_tidy(), '-dump-config'], cwd=PROJECT_ROOT).decode() - - # Getting the list of checks - check_list = subprocess.check_output( - [toolchain.discover_tidy(), '-list-checks'], cwd=PROJECT_ROOT).decode() - - # Printing it all to the user - checks = [x.strip() for x in check_list.split("\n") if '-' in x] - - print("Working directory: {}".format(PROJECT_ROOT)) - print("") - print(config) - print("") - print("Clang tidy checks:") - - for check in sorted(checks): - print(" -", check) - - -<<<<<<< HEAD -def run_clang_tidy(build_dir, filename, fix_issues: bool = False): -======= -def run_clang_tidy(build_dir: str, filename: str, fix_issues: bool = False) -> bool: ->>>>>>> features/llvm-passes - """ - Function that runs Clang tidy for a single file given a build directory - and a filename. - """ - - # Configuring the command line arguments - clang_tidy_binary = toolchain.discover_tidy() - - cmd = [clang_tidy_binary] - output_file = os.path.abspath(os.path.join(build_dir, 'clang_tidy_fixes.yaml')) - - cmd.append('-header-filter=".*\\/(Passes)\\/(libs)\\/.*"') - cmd.append('-p=' + build_dir) - cmd.append('-export-fixes={}'.format(output_file)) - cmd.append('--use-color') - - if fix_issues: - cmd.append("-fix") - - cmd.append(filename) - - logger.info("Running '{}'".format(" ".join(cmd))) - - # Getting the output - p = subprocess.Popen( - " ".join(cmd), - stdout=subprocess.PIPE, - stdin=subprocess.PIPE, - stderr=subprocess.PIPE, - cwd=PROJECT_ROOT, - shell=True) - - output, err = p.communicate() - - output = output.decode() - err = err.decode() - - if p.returncode != 0: - - # The return value is negative even if the user code is without - # errors, so we check whether there are any errors specified in - # error output - if "error" in err: - sys.stderr.write(output) - sys.stderr.write(err) - - logger.error("{} failed static analysis".format(filename)) - return False - - logger.info("All good!") - return True - - -<<<<<<< HEAD -def main_cpp(fix_issues: bool): -======= -def main_cpp(fix_issues: bool) -> bool: ->>>>>>> features/llvm-passes - """ - Main function for C++ linting. This function builds and lints - the code. - """ - - logger.info("Linting") - build_dir = os.path.join(PROJECT_ROOT, "Debug") - source_dir = os.path.join(PROJECT_ROOT, "libs") - generator = None - extensions = ["cpp"] - - # Configuring CMake - configure_cmake(build_dir, generator) - - # Building - build_project(build_dir, generator) - - # Generating list of files - # TODO(TFR): Ensure that it is only those which were changed that are - # analysed - files_to_analyse = [] - - for root, dirs, files in os.walk(source_dir): - for filename in files: - if "." not in filename: - continue - - _, ext = filename.rsplit(".", 1) - if ext in extensions: - files_to_analyse.append(os.path.join(root, filename)) - - success = True - for filename in files_to_analyse: - success = success and run_clang_tidy(build_dir, filename, fix_issues=fix_issues) - return success - - -<<<<<<< HEAD -def main(fix_issues: bool): -======= -def main(fix_issues: bool) -> None: ->>>>>>> features/llvm-passes - if not main_cpp(fix_issues): - sys.exit(-1) diff --git a/src/Passes/site-packages/TasksCI/settings.py.orig b/src/Passes/site-packages/TasksCI/settings.py.orig deleted file mode 100644 index 62f2f39331..0000000000 --- a/src/Passes/site-packages/TasksCI/settings.py.orig +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from os import path -import multiprocessing - -PROJECT_ROOT = path.abspath(path.dirname(path.dirname(path.dirname(__file__)))) - -MAX_CONCURRENCY = 7 - - -<<<<<<< HEAD -def get_concurrency(): -======= -def get_degree_of_concurrency() -> int: ->>>>>>> features/llvm-passes - """ - Function that gives a default concurrency for the compilation - and testing process. - """ - return min(MAX_CONCURRENCY, multiprocessing.cpu_count()) diff --git a/src/Passes/site-packages/TasksCI/toolchain.py.orig b/src/Passes/site-packages/TasksCI/toolchain.py.orig deleted file mode 100644 index ddd96c6e27..0000000000 --- a/src/Passes/site-packages/TasksCI/toolchain.py.orig +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import shutil - - -<<<<<<< HEAD -def discover_formatter(): -======= -def discover_formatter() -> str: ->>>>>>> features/llvm-passes - """ - Finds the clang-format executable - """ - return shutil.which("clang-format") - - -<<<<<<< HEAD -def discover_tidy(): -======= -def discover_tidy() -> str: ->>>>>>> features/llvm-passes - """ - Finds the clang-tidy executable - """ - return shutil.which("clang-tidy") - - -<<<<<<< HEAD -def discover_cmake(): -======= -def discover_cmake() -> str: ->>>>>>> features/llvm-passes - """ - Finds the cmake executable - """ - return shutil.which("cmake") - - -<<<<<<< HEAD -def discover_ctest(): -======= -def discover_ctest() -> str: ->>>>>>> features/llvm-passes - """ - Finds the ctest executable - """ - return shutil.which("ctest") From c64bf691410cc56c8c8ebb395b2a959cb39f601e Mon Sep 17 00:00:00 2001 From: "Troels F. Roennow" Date: Fri, 30 Jul 2021 07:49:31 +0200 Subject: [PATCH 48/48] Removing garbage --- .../examples/QubitAllocationAnalysis/test.txt | 199 ------------------ 1 file changed, 199 deletions(-) delete mode 100644 src/Passes/examples/QubitAllocationAnalysis/test.txt diff --git a/src/Passes/examples/QubitAllocationAnalysis/test.txt b/src/Passes/examples/QubitAllocationAnalysis/test.txt deleted file mode 100644 index 133f3304be..0000000000 --- a/src/Passes/examples/QubitAllocationAnalysis/test.txt +++ /dev/null @@ -1,199 +0,0 @@ -pushd ../../ && mkdir -p Debug && cd Debug && cmake ..&& popd || popd -~/Documents/Projects/qsharp-compiler/src/Passes ~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis --- Found LLVM 12.0.1 --- Using LLVMConfig.cmake in: /usr/local/opt/llvm/lib/cmake/llvm --- Configuring done --- Generating done --- Build files have been written to: /Users/tfr/Documents/Projects/qsharp-compiler/src/Passes/Debug -~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis -pushd ../../Debug && make QubitAllocationAnalysis && popd || popd -~/Documents/Projects/qsharp-compiler/src/Passes/Debug ~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis -Consolidate compiler generated dependencies of target QubitAllocationAnalysis -[100%] Built target QubitAllocationAnalysis -~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis -pushd ../../Debug && make ExpandStaticAllocation && popd || popd -~/Documents/Projects/qsharp-compiler/src/Passes/Debug ~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis -Consolidate compiler generated dependencies of target ExpandStaticAllocation -[100%] Built target ExpandStaticAllocation -~/Documents/Projects/qsharp-compiler/src/Passes/examples/QubitAllocationAnalysis -opt -load-pass-plugin ../../Debug/libs/libQubitAllocationAnalysis.dylib \ - -load-pass-plugin ../../Debug/libs/libExpandStaticAllocation.dylib --passes="expand-static-allocation" -S analysis-example.ll -; ModuleID = 'analysis-example.ll' -source_filename = "qir/ConstSizeArray.ll" - -%Array = type opaque -%String = type opaque - -define internal fastcc void @Example__Main__body() unnamed_addr { -entry: - call fastcc void @Example__QuantumProgram__body(i64 3, i64 2, i64 1) - call fastcc void @Example__QuantumProgram__body(i64 4, i64 9, i64 4) - ret void -} - -define internal fastcc void @Example__QuantumProgram__body(i64 %x, i64 %h, i64 %g) unnamed_addr { -entry: - %.neg = xor i64 %x, -1 - %.neg1 = mul i64 %.neg, %x - %z.neg = add i64 %.neg1, 47 - %y = mul i64 %x, 3 - %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) - %0 = add i64 %y, -2 - %1 = lshr i64 %0, 1 - %2 = add i64 %z.neg, %1 - %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) - %3 = sub i64 %y, %g - %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) - %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %h) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) - %4 = call fastcc i64 @Example__X__body(i64 %x) - %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) - br label %header__1 - -header__1: ; preds = %header__1, %entry - %idxIteration = phi i64 [ 0, %entry ], [ %5, %header__1 ] - %.not = icmp sgt i64 %idxIteration, %g - %5 = add i64 %idxIteration, 1 - br i1 %.not, label %exit__1, label %header__1 - -exit__1: ; preds = %header__1 - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits0) - ret void -} - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr - -declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr - -; Function Attrs: norecurse nounwind readnone willreturn -define internal fastcc i64 @Example__X__body(i64 %value) unnamed_addr #0 { -entry: - %0 = mul i64 %value, 3 - ret i64 %0 -} - -declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr - -define i64 @Example__Main__Interop() local_unnamed_addr #1 { -entry: - call fastcc void @Example__Main__body() - ret i64 0 -} - -define void @Example__Main() local_unnamed_addr #2 { -entry: - call fastcc void @Example__Main__body() - %0 = call %String* @__quantum__rt__int_to_string(i64 0) - call void @__quantum__rt__message(%String* %0) - call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) - ret void -} - -declare void @__quantum__rt__message(%String*) local_unnamed_addr - -declare %String* @__quantum__rt__int_to_string(i64) local_unnamed_addr - -define internal fastcc void @Example__QuantumProgram__body.1() unnamed_addr { -entry: - %.neg = xor i64 3, -1 - %.neg1 = mul i64 %.neg, 3 - %z.neg = add i64 %.neg1, 47 - %y = mul i64 3, 3 - %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) - %0 = add i64 %y, -2 - %1 = lshr i64 %0, 1 - %2 = add i64 %z.neg, %1 - %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) - %3 = sub i64 %y, 1 - %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) - %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) - %4 = call fastcc i64 @Example__X__body(i64 3) - %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) - br label %header__1 - -header__1: ; preds = %header__1, %entry - %idxIteration = phi i64 [ 0, %entry ], [ %5, %header__1 ] - %.not = icmp sgt i64 %idxIteration, 1 - %5 = add i64 %idxIteration, 1 - br i1 %.not, label %exit__1, label %header__1 - -exit__1: ; preds = %header__1 - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits0) - ret void -} - -define internal fastcc void @Example__QuantumProgram__body.2() unnamed_addr { -entry: - %.neg = xor i64 4, -1 - %.neg1 = mul i64 %.neg, 4 - %z.neg = add i64 %.neg1, 47 - %y = mul i64 4, 3 - %qubits0 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 1) - %0 = add i64 %y, -2 - %1 = lshr i64 %0, 1 - %2 = add i64 %z.neg, %1 - %qubits1 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 1) - %3 = sub i64 %y, 4 - %qubits2 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 1) - %qubits3 = call %Array* @__quantum__rt__qubit_allocate_array(i64 9) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 1) - %4 = call fastcc i64 @Example__X__body(i64 4) - %qubits4 = call %Array* @__quantum__rt__qubit_allocate_array(i64 %4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 1) - br label %header__1 - -header__1: ; preds = %header__1, %entry - %idxIteration = phi i64 [ 0, %entry ], [ %5, %header__1 ] - %.not = icmp sgt i64 %idxIteration, 4 - %5 = add i64 %idxIteration, 1 - br i1 %.not, label %exit__1, label %header__1 - -exit__1: ; preds = %header__1 - call void @__quantum__rt__array_update_alias_count(%Array* %qubits4, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits4) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits3, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits3) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits2, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits2) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits1, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits0, i32 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qubits0) - ret void -} - -attributes #0 = { norecurse nounwind readnone willreturn } -attributes #1 = { "InteropFriendly" } -attributes #2 = { "EntryPoint" }