diff --git a/.gitattributes b/.gitattributes
index dfe192803..6b7f505a2 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -99,10 +99,8 @@
*.tagx text
*.xml text
-# These files shouldn't be taken into account for statistics
-# since they are from external libraries
-src/main/javascript/polyfills/** linguist-vendored
-src/test/resources/jasmine/** linguist-vendored
+# These generated reports shouldn't be taken into account for language statistics
+results/** linguist-vendored
# These files are binary and should be left untouched
# (binary is a macro for -text -diff)
diff --git a/.github/workflows/code-structure-analysis.yml b/.github/workflows/java-code-analysis.yml
similarity index 92%
rename from .github/workflows/code-structure-analysis.yml
rename to .github/workflows/java-code-analysis.yml
index 815bf5ffb..81b8645e2 100644
--- a/.github/workflows/code-structure-analysis.yml
+++ b/.github/workflows/java-code-analysis.yml
@@ -1,4 +1,4 @@
-name: Code Structure Analysis
+name: Java Code Structure Graph Analysis
on:
push:
@@ -50,6 +50,8 @@ jobs:
env:
CI_COMMIT_MESSAGE: Automated code structure analysis reports (CI)
CI_COMMIT_AUTHOR: ${{ github.event.repository.name }} Continuous Integration
+ PROJECT_NAME: AxonFramework
+ # Version variable name matches renovate.json configuration entry
AXON_FRAMEWORK_VERSION: 4.9.3
steps:
@@ -111,16 +113,16 @@ jobs:
key:
${{ runner.os }}-${{ hashFiles('**/*.sh') }}
- - name: Download AxonFramework artifacts
+ - name: Download ${{ env.PROJECT_NAME }}-${{ env.AXON_FRAMEWORK_VERSION }}
working-directory: temp
run: |
- mkdir -p AxonFramework-${{ env.AXON_FRAMEWORK_VERSION }}
- cd AxonFramework-${{ env.AXON_FRAMEWORK_VERSION }}
+ mkdir -p ${{ env.PROJECT_NAME }}-${{ env.AXON_FRAMEWORK_VERSION }}
+ cd ${{ env.PROJECT_NAME }}-${{ env.AXON_FRAMEWORK_VERSION }}
echo "Working directory: $( pwd -P )"
./../../scripts/downloader/downloadAxonFramework.sh ${{ env.AXON_FRAMEWORK_VERSION }}
- - name: Analyze AxonFramework
- working-directory: temp/AxonFramework-${{ env.AXON_FRAMEWORK_VERSION }}
+ - name: Analyze ${{ env.PROJECT_NAME }}-${{ env.AXON_FRAMEWORK_VERSION }}
+ working-directory: temp/${{ env.PROJECT_NAME }}-${{ env.AXON_FRAMEWORK_VERSION }}
# Shell type can be skipped if jupyter notebook reports (and therefore conda) aren't needed
shell: bash -el {0}
env:
diff --git a/.github/workflows/typescript-code-analysis.yml b/.github/workflows/typescript-code-analysis.yml
new file mode 100644
index 000000000..53e92a988
--- /dev/null
+++ b/.github/workflows/typescript-code-analysis.yml
@@ -0,0 +1,185 @@
+name: Typescript Code Structure Graph Analysis
+
+on:
+ push:
+ branches:
+ - main
+ # Ignore changes in documentation, general configuration and reports for push events
+ paths-ignore:
+ - 'results/**'
+ - '**/*.md'
+ - '**/*.txt'
+ - '**/*.css'
+ - '**/*.html'
+ - '**/*.js'
+ - '.gitignore'
+ - '.gitattributes'
+ - 'renovate.json'
+ - 'changelogTemplate.mustache'
+ - '**.code-workspace'
+ pull_request:
+ branches:
+ - main
+ # Ignore changes in documentation, general configuration and reports for pull request events
+ paths-ignore:
+ - 'results/**'
+ - '**/*.md'
+ - '**/*.txt'
+ - '**/*.css'
+ - '**/*.html'
+ - '**/*.js'
+ - '.gitignore'
+ - '.gitattributes'
+ - 'renovate.json'
+ - 'changelogTemplate.mustache'
+ - '**.code-workspace'
+
+# Requires the secret NEO4J_INITIAL_PASSWORD to be configured
+jobs:
+ reports:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ include:
+ - os: ubuntu-latest
+ java: 17
+ python: 3.11
+ mambaforge: 24.3.0-0
+ node: 18
+
+ env:
+ CI_COMMIT_MESSAGE: Automated code structure analysis reports (CI)
+ CI_COMMIT_AUTHOR: ${{ github.event.repository.name }} Continuous Integration
+ PROJECT_NAME: react-router
+ # Version variable name matches renovate.json configuration entry
+ REACT_ROUTER_VERSION: 6.22.0
+
+ steps:
+ - name: Checkout GIT Repository
+ uses: actions/checkout@v4
+ with:
+ token: ${{ secrets.WORKFLOW_GIT_ACCESS_TOKEN }}
+
+ - name: Setup Java JDK ${{ matrix.java }}
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'adopt'
+ java-version: ${{ matrix.java }}
+
+ - name: Setup node.js ${{ matrix.node }} for Graph Visualization
+ uses: actions/setup-node@v4
+ with:
+ node-version: ${{ matrix.node }}
+
+ - name: Install nodes packages for Graph Visualization
+ working-directory: graph-visualization
+ run: npm ci
+
+ - name: Setup Cache for Conda package manager Mambaforge
+ uses: actions/cache@v4
+ env:
+ # Increase this value to reset cache if etc/example-environment.yml has not changed
+ # Reference: https://github.com/conda-incubator/setup-miniconda#caching
+ CACHE_NUMBER: 0
+ with:
+ path: ~/conda_pkgs_dir
+ key:
+ ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-environments-${{hashFiles('**/environment.yml', '.github/workflows/*.yml') }}
+
+ # "Setup Python" can be skipped if jupyter notebook reports aren't needed
+ - name: Setup Python ${{ matrix.python }} with Conda package manager Mambaforge
+ uses: conda-incubator/setup-miniconda@v3
+ with:
+ python-version: ${{ matrix.python }}
+ miniforge-variant: Mambaforge
+ miniforge-version: ${{ matrix.mambaforge }}
+ use-mamba: true
+ activate-environment: codegraph
+ environment-file: ./jupyter/environment.yml
+ auto-activate-base: false
+ use-only-tar-bz2: true # IMPORTANT: This needs to be set for caching to work properly!
+
+ - name: Conda environment info
+ shell: bash -el {0}
+ run: conda info
+
+ - name: Setup temp directory if missing
+ run: mkdir -p ./temp
+
+ - name: Setup Cache for "temp/downloads" folder
+ uses: actions/cache@v4
+ with:
+ path: ./temp/downloads
+ key:
+ ${{ runner.os }}-${{ hashFiles('**/*.sh') }}
+
+ - name: Download ${{ env.PROJECT_NAME }}-${{ env.REACT_ROUTER_VERSION }}
+ working-directory: temp
+ run: |
+ mkdir -p ${{ env.PROJECT_NAME }}-${{ env.REACT_ROUTER_VERSION }}
+ cd ${{ env.PROJECT_NAME }}-${{ env.REACT_ROUTER_VERSION }}
+ echo "Working directory: $( pwd -P )"
+ ./../../scripts/downloader/downloadReactRouter.sh ${{ env.REACT_ROUTER_VERSION }}
+
+ - name: Analyze ${{ env.PROJECT_NAME }}-${{ env.REACT_ROUTER_VERSION }}
+ working-directory: temp/${{ env.PROJECT_NAME }}-${{ env.REACT_ROUTER_VERSION }}
+ # Shell type can be skipped if jupyter notebook reports (and therefore conda) aren't needed
+ shell: bash -el {0}
+ env:
+ NEO4J_INITIAL_PASSWORD: ${{ secrets.NEO4J_INITIAL_PASSWORD }}
+ ENABLE_JUPYTER_NOTEBOOK_PDF_GENERATION: "true"
+ run: |
+ ./../../scripts/analysis/analyze.sh
+
+ - name: Move reports from the temp to the results directory preserving their surrounding directory
+ working-directory: temp
+ run: ./../scripts/copyReportsIntoResults.sh
+
+ # Upload logs and unfinished reports in case of an error for troubleshooting
+ - name: Archive failed run with logs and unfinished results
+ if: failure()
+ uses: actions/upload-artifact@v4
+ with:
+ name: code-analysis-logs-java-${{ matrix.java }}-python-${{ matrix.python }}-mambaforge-${{ matrix.mambaforge }}
+ path: |
+ ./temp/**/runtime/*
+ ./temp/**/reports/*
+ retention-days: 5
+
+ # Upload successful results in case they are needed for troubleshooting
+ - name: Archive successful results
+ if: success()
+ uses: actions/upload-artifact@v4
+ with:
+ name: code-report-results-java-${{ matrix.java }}-python-${{ matrix.python }}-mambaforge-${{ matrix.mambaforge }}
+ path: ./results
+ if-no-files-found: error
+ retention-days: 5
+
+ # Upload Database Export
+ # Only possible after an export with "./../../scripts/analysis/analyze.sh --report DatabaseCsvExport"
+ # Won't be done here because of performance and security concerns
+ #- name: Archive exported database
+ # uses: actions/upload-artifact@v3
+ # with:
+ # name: code-report-database-export-${{ matrix.java }}-python-${{ matrix.python }}-mambaforge-${{ matrix.mambaforge }}
+ # path: ./temp/**/import
+ # if-no-files-found: error
+ # retention-days: 5
+
+ # Commit and push the native image agent results
+ - name: Display environment variable "github.event_name"
+ run: echo "github.event_name=${{ github.event_name }}"
+ - name: Commit "results" directory containing the reports
+ # Only run when a pull request gets merged or a commit is pushed to the main branch
+ # git add parameters need to match paths-ignore parameters above
+ # Git pull before add/commit/push to reduce race conditions on parallel builds
+ if: github.event_name == 'push'
+ run: |
+ git config --global user.name '${{ env.CI_COMMIT_AUTHOR }}'
+ git config --global user.email 'joht@users.noreply.github.com'
+ git config --local http.postBuffer 524288000
+ git pull
+ git add results
+ git commit -m "${{ env.CI_COMMIT_MESSAGE }}"
+ git push
diff --git a/COMMANDS.md b/COMMANDS.md
index 7f2ee91e0..23b032401 100644
--- a/COMMANDS.md
+++ b/COMMANDS.md
@@ -9,7 +9,7 @@ To run all analysis steps simple execute the following command:
```
π See [scripts/examples/analyzeAxonFramework.sh](./scripts/examples/analyzeAxonFramework.sh) as an example script that combines all the above steps.
-π See [Code Structure Analysis Pipeline](./.github/workflows/code-structure-analysis.yml) on how to do this within a GitHub Actions Workflow.
+π See [Code Structure Analysis Pipeline](./.github/workflows/java-code-analysis.yml) on how to do this within a GitHub Actions Workflow.
### Command Line Options
@@ -87,7 +87,7 @@ Change into the [scripts](./scripts/) directory e.g. with `cd scripts` and then
Change into the [results](./results/) directory e.g. with `cd results` and then execute the script [generateCsvReportReference.sh](./scripts/documentation/generateCsvReportReference.sh) with the following command:
π**Note:** This script is automatically triggered at the end of [copyReportsIntoResults.sh](./scripts/copyReportsIntoResults.sh)
-which is included in the pipeline [code-structure-analysis.yml](.github/workflows/code-structure-analysis.yml) and doesn't need to be executed manually normally.
+which is included in the pipeline [java-code-analysis.yml](.github/workflows/java-code-analysis.yml) and doesn't need to be executed manually normally.
```script
./../scripts/documentation/generateCsvReportReference.sh
@@ -98,7 +98,7 @@ which is included in the pipeline [code-structure-analysis.yml](.github/workflow
Change into the [results](./results/) directory e.g. with `cd results` and then execute the script [generateJupyterReportReference.sh](./scripts/documentation/generateJupyterReportReference.sh) with the following command:
π**Note:** This script is automatically triggered at the end of [copyReportsIntoResults.sh](./scripts/copyReportsIntoResults.sh)
-which is included in the pipeline [code-structure-analysis.yml](.github/workflows/code-structure-analysis.yml) and doesn't need to be executed manually normally.
+which is included in the pipeline [java-code-analysis.yml](.github/workflows/java-code-analysis.yml) and doesn't need to be executed manually normally.
```script
./../scripts/documentation/generateJupyterReportReference.sh
@@ -109,7 +109,7 @@ which is included in the pipeline [code-structure-analysis.yml](.github/workflow
Change into the [results](./results/) directory e.g. with `cd results` and then execute the script [generateImageReference.sh](./scripts/documentation/generateImageReference.sh) with the following command:
π**Note:** This script is automatically triggered at the end of [copyReportsIntoResults.sh](./scripts/copyReportsIntoResults.sh)
-which is included in the pipeline [code-structure-analysis.yml](.github/workflows/code-structure-analysis.yml) and doesn't need to be executed manually normally.
+which is included in the pipeline [java-code-analysis.yml](.github/workflows/java-code-analysis.yml) and doesn't need to be executed manually normally.
```script
./../scripts/documentation/generateImageReference.sh
diff --git a/GETTING_STARTED.md b/GETTING_STARTED.md
index e47195b0b..f66fcb409 100644
--- a/GETTING_STARTED.md
+++ b/GETTING_STARTED.md
@@ -81,4 +81,4 @@ Please read through the [Prerequisites](./README.md#π -prerequisites) in the [
Then open your browser and login to your [local Neo4j Web UI](http://localhost:7474/browser) with "neo4j" as user and the initial password you've chosen.
π See [scripts/examples/analyzeAxonFramework.sh](./scripts/examples/analyzeAxonFramework.sh) as an example script that combines all the above steps.
-π See [Code Structure Analysis Pipeline](./.github/workflows/code-structure-analysis.yml) on how to do this within a GitHub Actions Workflow.
\ No newline at end of file
+π See [Code Structure Analysis Pipeline](./.github/workflows/java-code-analysis.yml) on how to do this within a GitHub Actions Workflow.
\ No newline at end of file
diff --git a/README.md b/README.md
index 2da889ce3..27f087351 100644
--- a/README.md
+++ b/README.md
@@ -2,14 +2,17 @@
-Contained within this repository is a comprehensive and automated code graph analysis pipeline. While initially designed to support Java through the utilization of [jQAssistant](https://jqassistant.org/get-started), it is open to extension for further programming languages. The graph database [Neo4j](https://neo4j.com) serves as the foundation for storing and querying the graph, which encompasses all the structural intricacies of the analyzed code. Additionally, Neo4j's [Graph Data Science](https://neo4j.com/product/graph-data-science) provides additional algorithms like community detection to analyze the code structure. The generated reports offer flexibility, ranging from simple query results presented as CSV files to more elaborate Jupyter Notebooks converted to Markdown or PDF formats.
+Contained within this repository is a comprehensive and automated code graph analysis pipeline. While initially designed to support Java through the utilization of [jQAssistant](https://jqassistant.org/get-started), it now also [supports Typescript](https://github.com/jqassistant-plugin/jqassistant-typescript-plugin) and is open to extension for further programming languages. The graph database [Neo4j](https://neo4j.com) serves as the foundation for storing and querying the graph, which encompasses all the structural intricacies of the analyzed code. Additionally, Neo4j's [Graph Data Science](https://neo4j.com/product/graph-data-science) provides additional algorithms like community detection to analyze the code structure. The generated reports offer flexibility, ranging from simple query results presented as CSV files to more elaborate Jupyter Notebooks converted to Markdown or PDF formats.
---
## β¨ Features
-- Analyze static Java code structure as a graph
-- Fully automated [pipeline](./.github/workflows/code-structure-analysis.yml) from tool installation to report generation
+- Analyze static code structure as a graph
+- **πNewπ:** Also supports Typescript
+- Fully automated [pipeline for Java](./.github/workflows/java-code-analysis.yml) from tool installation to report generation
+- Fully automated [pipeline for Typescript](./.github/workflows/typescript-code-analysis.yml) from tool installation to report generation
+- Fully automated [local run](./GETTING_STARTED.md)
- More than 130 CSV reports for dependencies, metrics, cycles, annotations, algorithms and many more
- Jupyter notebook reports for dependencies, metrics, visibility and many more
- Graph structure visualization
@@ -78,13 +81,25 @@ These tools are needed to run the graph visualization scripts of directory [grap
- Add this line to your `~/.bashrc` file if you are using Anaconda3: `/c/ProgramData/Anaconda3/etc/profile.d/conda.sh`. Try to find a similar script for other conda package managers or versions.
- Run `conda init` in the git bash opened as administrator. Running it in normal mode usually leads to an error message.
+### Additional Prerequisites for analyzing Typescript
+
+- Please follow the description on how to create a json file with the static code information
+of your Typescript project here: https://github.com/jqassistant-plugin/jqassistant-typescript-plugin
+This could be as simple as running the following command in your Typescript project:
+
+ ```shell
+ npx --yes @jqassistant/ts-lce
+ ```
+
+- Copy the resulting json file (e.g. `.reports/jqa/ts-output.json`) into the "artifacts" directory for your analysis work directory. Custom subdirectories within "artifacts" are also supported.
+
## π Getting Started
See [GETTING_STARTED.md](./GETTING_STARTED.md) on how to get started on your local machine.
## π Pipeline and Tools
-The [Code Structure Analysis Pipeline](./.github/workflows/code-structure-analysis.yml) utilizes [GitHub Actions](https://docs.github.com/de/actions) to automate the whole analysis process:
+The [Code Structure Analysis Pipeline](./.github/workflows/java-code-analysis.yml) utilizes [GitHub Actions](https://docs.github.com/de/actions) to automate the whole analysis process:
- Use [GitHub Actions](https://docs.github.com/de/actions) Linux Runner
- [Checkout GIT Repository](https://github.com/actions/checkout)
@@ -92,7 +107,7 @@ The [Code Structure Analysis Pipeline](./.github/workflows/code-structure-analys
- [Setup Python with Conda](https://github.com/conda-incubator/setup-miniconda) package manager [Mambaforge](https://github.com/conda-forge/miniforge#mambaforge)
- Download artifacts that contain the code to be analyzed [scripts/artifacts](./scripts/downloader/)
- Setup [Neo4j](https://neo4j.com) Graph Database ([analysis.sh](./scripts/analysis/analyze.sh))
-- Setup [jQAssistant](https://jqassistant.org/get-started) for Java Analysis ([analysis.sh](./scripts/analysis/analyze.sh))
+- Setup [jQAssistant](https://jqassistant.org/get-started) for Java and [Typescript](https://github.com/jqassistant-plugin/jqassistant-typescript-plugin) analysis ([analysis.sh](./scripts/analysis/analyze.sh))
- Start [Neo4j](https://neo4j.com) Graph Database ([analysis.sh](./scripts/analysis/analyze.sh))
- Generate CSV Reports [scripts/reports](./scripts/reports) using the command line JSON parser [jq](https://jqlang.github.io/jq)
- Generate [Jupyter Notebook](https://jupyter.org) reports using these libraries specified in the [environment.yml](./jupyter/environment.yml):
@@ -109,7 +124,7 @@ The [Code Structure Analysis Pipeline](./.github/workflows/code-structure-analys
- [wordcloud](https://github.com/amueller/word_cloud)
- [Graph Visualization](./graph-visualization/README.md) uses [node.js](https://nodejs.org/de) and the dependencies listed in [package.json](./graph-visualization/package.json).
-**Big shout-out** π£ to all the creators and contributors of these great libraries π. Projects like this wouldn't be possible without them. Feel free to [create an issue](https://github.com/JohT/code-graph-analysis-pipeline/issues/new/choose) if i've forgotten something in the list.
+**Big shout-out** π£ to all the creators and contributors of these great libraries π. Projects like this wouldn't be possible without them. Feel free to [create an issue](https://github.com/JohT/code-graph-analysis-pipeline/issues/new/choose) if something is missing or wrong in the list.
## π Command Reference
@@ -143,6 +158,7 @@ The [Code Structure Analysis Pipeline](./.github/workflows/code-structure-analys
## π€ Questions & Answers
- How can i run an analysis locally?
+ π Check the [prerequisites](#π -prerequisites).
π See [Start an analysis](./COMMANDS.md#start-an-analysis) in the [Commands Reference](./COMMANDS.md).
π To get started from scratch see [GETTING_STARTED.md](./GETTING_STARTED.md).
@@ -161,7 +177,7 @@ The [Code Structure Analysis Pipeline](./.github/workflows/code-structure-analys
- How can i add another code basis to be analyzed automatically?
π Create a new artifacts download script in the [scripts/downloader](./scripts/downloader/) directory. Take for example [downloadAxonFramework.sh](./scripts/downloader/downloadAxonFramework.sh) as a reference.
- π Run the script separately before executing [analyze.sh](./scripts/analysis/analyze.sh) also in the [pipeline](./.github/workflows/code-structure-analysis.yml).
+ π Run the script separately before executing [analyze.sh](./scripts/analysis/analyze.sh) also in the [pipeline](./.github/workflows/java-code-analysis.yml).
- How can i trigger a full rescan of all artifacts?
π Delete the file `artifactsChangeDetectionHash.txt` in the `artifacts` directory.
diff --git a/cypher/Artifact_Dependencies/Incoming_Artifact_Dependencies.cypher b/cypher/Artifact_Dependencies/Incoming_Java_Artifact_Dependencies.cypher
similarity index 93%
rename from cypher/Artifact_Dependencies/Incoming_Artifact_Dependencies.cypher
rename to cypher/Artifact_Dependencies/Incoming_Java_Artifact_Dependencies.cypher
index a13e2d33a..5b11d561d 100644
--- a/cypher/Artifact_Dependencies/Incoming_Artifact_Dependencies.cypher
+++ b/cypher/Artifact_Dependencies/Incoming_Java_Artifact_Dependencies.cypher
@@ -1,6 +1,6 @@
// Incoming Artifact Dependencies
- MATCH (a:Artifact:Archive)
+ MATCH (a:Java:Artifact:Archive)
OPTIONAL MATCH (a)<-[r:DEPENDS_ON]-(ea:Artifact:Archive)
WHERE a.fileName <> ea.fileName
WITH a
diff --git a/cypher/Artifact_Dependencies/Outgoing_Artifact_Dependencies.cypher b/cypher/Artifact_Dependencies/Outgoing_Java_Artifact_Dependencies.cypher
similarity index 92%
rename from cypher/Artifact_Dependencies/Outgoing_Artifact_Dependencies.cypher
rename to cypher/Artifact_Dependencies/Outgoing_Java_Artifact_Dependencies.cypher
index f0a46e979..700c46ebe 100644
--- a/cypher/Artifact_Dependencies/Outgoing_Artifact_Dependencies.cypher
+++ b/cypher/Artifact_Dependencies/Outgoing_Java_Artifact_Dependencies.cypher
@@ -1,6 +1,6 @@
// Outgoing Artifact Dependencies
- MATCH (a:Artifact:Archive)
+ MATCH (a:Java:Artifact:Archive)
OPTIONAL MATCH (a)-[r:DEPENDS_ON]->(ea:Artifact:Archive)
WHERE a.fileName <> ea.fileName
WITH a
diff --git a/cypher/Artifact_Dependencies/Set_number_of_packages_and_types_on_artifacts.cypher b/cypher/Artifact_Dependencies/Set_number_of_Java_packages_and_types_on_artifacts.cypher
similarity index 74%
rename from cypher/Artifact_Dependencies/Set_number_of_packages_and_types_on_artifacts.cypher
rename to cypher/Artifact_Dependencies/Set_number_of_Java_packages_and_types_on_artifacts.cypher
index 491a27894..f651eb1da 100644
--- a/cypher/Artifact_Dependencies/Set_number_of_packages_and_types_on_artifacts.cypher
+++ b/cypher/Artifact_Dependencies/Set_number_of_Java_packages_and_types_on_artifacts.cypher
@@ -1,6 +1,6 @@
-// Set number of packages and types on artifacts
+// Set number of Java packages and types on artifacts
- MATCH (artifact:Artifact)-[:CONTAINS]->(package:Package)
+ MATCH (artifact:Java:Artifact)-[:CONTAINS]->(package:Java:Package)
MATCH (package)-[:CONTAINS]->(type:Type)
WITH artifact
,COUNT(DISTINCT package.fqn) AS numberOfPackages
diff --git a/cypher/Create_index_for_full_qualified_type_name.cypher b/cypher/Create_Java_Type_index_for_full_qualified_name.cypher
similarity index 51%
rename from cypher/Create_index_for_full_qualified_type_name.cypher
rename to cypher/Create_Java_Type_index_for_full_qualified_name.cypher
index c7e12aa3e..387f20e87 100644
--- a/cypher/Create_index_for_full_qualified_type_name.cypher
+++ b/cypher/Create_Java_Type_index_for_full_qualified_name.cypher
@@ -1,3 +1,3 @@
// Create index for the full qualified type name
-CREATE INDEX INDEX_FULL_QUALIFIED_TYPE_NAME IF NOT EXISTS FOR (t:Type) ON (t.fqn)
\ No newline at end of file
+CREATE INDEX INDEX_JAVA_FULL_QUALIFIED_NAME IF NOT EXISTS FOR (t:Type) ON (t.fqn)
\ No newline at end of file
diff --git a/cypher/Create_Typescript_index_for_full_qualified_name.cypher b/cypher/Create_Typescript_index_for_full_qualified_name.cypher
new file mode 100644
index 000000000..1c7a8b22b
--- /dev/null
+++ b/cypher/Create_Typescript_index_for_full_qualified_name.cypher
@@ -0,0 +1,3 @@
+// Create index for the full qualified type name
+
+CREATE INDEX INDEX_TYPESCRIPT_FULL_QUALIFIED_NAME IF NOT EXISTS FOR (t:TS) ON (t.globalFqn)
\ No newline at end of file
diff --git a/cypher/Dependencies_Projection/Dependencies_0_Check_Projectable.cypher b/cypher/Dependencies_Projection/Dependencies_0_Check_Projectable.cypher
new file mode 100644
index 000000000..307eefa23
--- /dev/null
+++ b/cypher/Dependencies_Projection/Dependencies_0_Check_Projectable.cypher
@@ -0,0 +1,8 @@
+// Check if there is at least one projectable dependency. Variables: dependencies_projection_node, dependencies_projection_weight_property
+
+ MATCH (source)-[dependency:DEPENDS_ON]->(target)
+ WHERE $dependencies_projection_node IN labels(source)
+ AND $dependencies_projection_node IN labels(target)
+ AND $dependencies_projection_weight_property IN keys(dependency)
+RETURN elementId(source) AS sourceElementId
+ LIMIT 1
\ No newline at end of file
diff --git a/cypher/Dependencies_Projection/Dependencies_3c_Create_Type_Projection.cypher b/cypher/Dependencies_Projection/Dependencies_3c_Create_Java_Type_Projection.cypher
similarity index 72%
rename from cypher/Dependencies_Projection/Dependencies_3c_Create_Type_Projection.cypher
rename to cypher/Dependencies_Projection/Dependencies_3c_Create_Java_Type_Projection.cypher
index 849bcb7fb..af7ec3032 100644
--- a/cypher/Dependencies_Projection/Dependencies_3c_Create_Type_Projection.cypher
+++ b/cypher/Dependencies_Projection/Dependencies_3c_Create_Java_Type_Projection.cypher
@@ -1,6 +1,6 @@
-// Create filtered Type node projection without zero-degree nodes, external types, java types or duplicates. Variables: dependencies_projection. Requires 'Label_base_java_types', 'Label_buildin_java_types' and 'Label_resolved_duplicate_types' of 'Types' directory.
+// Create filtered Java Type node projection without zero-degree nodes, external types, java types or duplicates. Variables: dependencies_projection. Requires 'Label_base_java_types', 'Label_buildin_java_types' and 'Label_resolved_duplicate_types' of 'Types' directory.
- MATCH (internalType:Type&!PrimitiveType&!Void&!JavaType&!ResolvedDuplicateType&!ExternalType)
+ MATCH (internalType:Java&Type&!PrimitiveType&!Void&!JavaType&!ResolvedDuplicateType&!ExternalType)
OPTIONAL MATCH (internalType)-[typeDependency:DEPENDS_ON]->(dependentType:Type&!PrimitiveType&!Void&!JavaType&!ResolvedDuplicateType&!ExternalType)
WITH internalType
,typeDependency
diff --git a/cypher/Method_Projection/Methods_2_Create_Projection.cypher b/cypher/Dependencies_Projection/Dependencies_3d_Create_Java_Method_Projection.cypher
similarity index 65%
rename from cypher/Method_Projection/Methods_2_Create_Projection.cypher
rename to cypher/Dependencies_Projection/Dependencies_3d_Create_Java_Method_Projection.cypher
index 0c3bfcbd5..4cc3fd5f6 100644
--- a/cypher/Method_Projection/Methods_2_Create_Projection.cypher
+++ b/cypher/Dependencies_Projection/Dependencies_3d_Create_Java_Method_Projection.cypher
@@ -1,6 +1,6 @@
-// Create directed projection for methods. Variables: dependencies_projection, dependencies_projection_weight_property
+// Create directed projection for public Java methods filtering out constructors, getters and setters. Variables: dependencies_projection, dependencies_projection_weight_property
- MATCH (source:Method)-[r:INVOKES]->(target:Method)
+ MATCH (source:Java:Method)-[r:INVOKES]->(target:Java:Method)
WHERE source.effectiveLineCount > 1
AND target.effectiveLineCount > 1
AND source.visibility = 'public'
diff --git a/cypher/DependsOn_Relationship_Weights/Add_fine_grained_weights_for_Typescript_module_dependencies.cypher b/cypher/DependsOn_Relationship_Weights/Add_fine_grained_weights_for_Typescript_module_dependencies.cypher
new file mode 100644
index 000000000..583085f6f
--- /dev/null
+++ b/cypher/DependsOn_Relationship_Weights/Add_fine_grained_weights_for_Typescript_module_dependencies.cypher
@@ -0,0 +1,68 @@
+// Add fine grained weight properties for dependencies between Typescript modules
+
+// Get the top level dependency between a Typescript module and the external modules it uses
+ MATCH (source:TS:Module)-[moduleDependency:DEPENDS_ON]->(target:ExternalModule)
+ WHERE NOT EXISTS {(target)-[:RESOLVES_TO]->(source)}
+OPTIONAL MATCH (source)-[resolvedModuleDependency:DEPENDS_ON]->(resolvedTarget:TS:Module)<-[:RESOLVES_TO]-(target)
+ WITH source
+ ,target
+ ,moduleDependency
+ ,resolvedModuleDependency
+ ,moduleDependency.cardinality AS externalModuleCardinality
+// Get optional external (e.g. type) declarations that the external module (target) provides and the source module uses
+OPTIONAL MATCH (source)-[rd:DEPENDS_ON]->(declaration:ExternalDeclaration)<-[:EXPORTS]-(target)
+ WITH source
+ ,target
+ ,moduleDependency
+ ,resolvedModuleDependency
+ ,externalModuleCardinality
+ ,count(DISTINCT declaration.globalFqn) AS declarationCount
+ ,sum(rd.cardinality) AS declarationCardinality
+ ,collect(DISTINCT declaration.globalFqn)[0..4] AS declarationExamples
+ ,collect(declaration) AS declarations
+// Get optional low coupling elements (TypeAlias, Interface) that the source module contains and defines (low level) that depend on the external module (target)
+UNWIND declarations AS declaration
+OPTIONAL MATCH (source)-[ra:DEPENDS_ON]->(declaration)-[:RESOLVES_TO]->(abstractType:TypeAlias|Interface)
+ WITH source
+ ,target
+ ,moduleDependency
+ ,resolvedModuleDependency
+ ,externalModuleCardinality
+ ,declarationCount
+ ,declarationCardinality
+ ,declarationExamples
+ ,count(DISTINCT abstractType.globalFqn) AS abstractTypeCount
+ ,sum(ra.cardinality) AS abstractTypeCardinality
+ ,collect(DISTINCT abstractType.globalFqn)[0..4] AS abstractTypeExamples
+// Set additional fine grained relationship properties (weights) to distinguish low and high coupling elements.
+// The "cardinality" property is similar to "weight" property for Java dependencies and comes from the jQAssistant Typescript Plugin.
+// - "abstractTypeCardinality" is the sum of all TypeAlias and Interface cardinality properties (if available)
+// and corresponds to the "weightInterfaces" relationship property for Java.
+// - "declarationCardinality" is the sum of all cardinality properties (including available Interface and TypeAlias)
+// and is the same as the already existing "cardinality" of the moduleDependency. Thats why its left out.
+// - "lowCouplingElement25PercentWeight" subtracts 75% of the weights for abstract types like Interfaces and Type aliases
+// to compensate for their low coupling influence. Not included "high coupling" elements like Functions and Classes
+// remain in the weight as they were. The same applies for "lowCouplingElement10PercentWeight" but with in a stronger manner.
+ SET moduleDependency.declarationCount = declarationCount
+ ,moduleDependency.abstractTypeCount = abstractTypeCount
+ ,moduleDependency.abstractTypeCardinality = abstractTypeCardinality
+ ,moduleDependency.lowCouplingElement25PercentWeight = toInteger(moduleDependency.cardinality - round(abstractTypeCardinality * 0.75))
+ ,moduleDependency.lowCouplingElement10PercentWeight = toInteger(moduleDependency.cardinality - round(abstractTypeCardinality * 0.90))
+ // Set all new properties also to a resolved (direct) dependency relationship if it exists.
+ ,resolvedModuleDependency.declarationCount = declarationCount
+ ,resolvedModuleDependency.abstractTypeCount = abstractTypeCount
+ ,resolvedModuleDependency.abstractTypeCardinality = abstractTypeCardinality
+ ,resolvedModuleDependency.lowCouplingElement25PercentWeight = toInteger(resolvedModuleDependency.cardinality - round(abstractTypeCardinality * 0.75))
+ ,resolvedModuleDependency.lowCouplingElement10PercentWeight = toInteger(resolvedModuleDependency.cardinality - round(abstractTypeCardinality * 0.90))
+RETURN source.globalFqn AS sourceName
+ ,target.globalFqn AS targetName
+ ,declarationCount
+ ,abstractTypeCount
+ ,externalModuleCardinality
+ ,declarationCardinality
+ ,abstractTypeCardinality
+ ,moduleDependency.lowCouplingElement25PercentWeight
+ ,moduleDependency.lowCouplingElement10PercentWeight
+ ,declarationExamples
+ ,abstractTypeExamples
+ORDER BY sourceName ASC
\ No newline at end of file
diff --git a/cypher/Package_Relationship_Weights/Add_weight10PercentInterfaces_to_Package_DEPENDS_ON_relationships.cypher b/cypher/DependsOn_Relationship_Weights/Add_weight10PercentInterfaces_to_Java_Package_DEPENDS_ON_relationships.cypher
similarity index 83%
rename from cypher/Package_Relationship_Weights/Add_weight10PercentInterfaces_to_Package_DEPENDS_ON_relationships.cypher
rename to cypher/DependsOn_Relationship_Weights/Add_weight10PercentInterfaces_to_Java_Package_DEPENDS_ON_relationships.cypher
index 214f59285..13273f358 100644
--- a/cypher/Package_Relationship_Weights/Add_weight10PercentInterfaces_to_Package_DEPENDS_ON_relationships.cypher
+++ b/cypher/DependsOn_Relationship_Weights/Add_weight10PercentInterfaces_to_Java_Package_DEPENDS_ON_relationships.cypher
@@ -1,6 +1,6 @@
// Add weight10PercentInterfaces to Package DEPENDS_ON relationships
- MATCH (package:Package)-[r:DEPENDS_ON]->(dependent:Package)
+ MATCH (package:Java:Package)-[r:DEPENDS_ON]->(dependent:Java:Package)
WITH package, r
,toInteger(r.weight - round(r.weightInterfaces * 0.90)) AS weight10PercentInterfaces
SET r.weight10PercentInterfaces = weight10PercentInterfaces
diff --git a/cypher/Package_Relationship_Weights/Add_weight25PercentInterfaces_to_Package_DEPENDS_ON_relationships.cypher b/cypher/DependsOn_Relationship_Weights/Add_weight25PercentInterfaces_to_Java_Package_DEPENDS_ON_relationships.cypher
similarity index 83%
rename from cypher/Package_Relationship_Weights/Add_weight25PercentInterfaces_to_Package_DEPENDS_ON_relationships.cypher
rename to cypher/DependsOn_Relationship_Weights/Add_weight25PercentInterfaces_to_Java_Package_DEPENDS_ON_relationships.cypher
index 5922d8ff3..4ef84a68a 100644
--- a/cypher/Package_Relationship_Weights/Add_weight25PercentInterfaces_to_Package_DEPENDS_ON_relationships.cypher
+++ b/cypher/DependsOn_Relationship_Weights/Add_weight25PercentInterfaces_to_Java_Package_DEPENDS_ON_relationships.cypher
@@ -1,6 +1,6 @@
// Add weight25PercentInterfaces to Package DEPENDS_ON relationships
- MATCH (package:Package)-[r:DEPENDS_ON]->(dependent:Package)
+ MATCH (package:Java:Package)-[r:DEPENDS_ON]->(dependent:Java:Package)
WITH package, r
,toInteger(r.weight - round(r.weightInterfaces * 0.75)) AS weight25PercentInterfaces
SET r.weight25PercentInterfaces = weight25PercentInterfaces
diff --git a/cypher/Package_Relationship_Weights/Add_weight_property_for_Interface_Dependencies_to_Package_DEPENDS_ON_Relationship.cypher b/cypher/DependsOn_Relationship_Weights/Add_weight_property_for_Java_Interface_Dependencies_to_Package_DEPENDS_ON_Relationship.cypher
similarity index 88%
rename from cypher/Package_Relationship_Weights/Add_weight_property_for_Interface_Dependencies_to_Package_DEPENDS_ON_Relationship.cypher
rename to cypher/DependsOn_Relationship_Weights/Add_weight_property_for_Java_Interface_Dependencies_to_Package_DEPENDS_ON_Relationship.cypher
index 6a2a1ff58..ae16480ec 100644
--- a/cypher/Package_Relationship_Weights/Add_weight_property_for_Interface_Dependencies_to_Package_DEPENDS_ON_Relationship.cypher
+++ b/cypher/DependsOn_Relationship_Weights/Add_weight_property_for_Java_Interface_Dependencies_to_Package_DEPENDS_ON_Relationship.cypher
@@ -1,6 +1,6 @@
// Add weight property for Interface Dependencies to Package DEPENDS_ON Relationship
- MATCH (sourcePackage:Package)-[packageDependency:DEPENDS_ON]->(dependentPackage:Package)
+ MATCH (sourcePackage:Java:Package)-[packageDependency:DEPENDS_ON]->(dependentPackage:Java:Package)
MATCH (sourcePackage)-[:CONTAINS]->(sourceType:Type)
OPTIONAL MATCH (sourceType:Type)-[typeDependency:DEPENDS_ON]->(dependentInterface:Interface)<-[:CONTAINS]-(dependentPackage)
WHERE sourcePackage.fqn <> dependentPackage.fqn
diff --git a/cypher/Package_Relationship_Weights/Add_weight_property_to_Package_DEPENDS_ON_Relationship.cypher b/cypher/DependsOn_Relationship_Weights/Add_weight_property_to_Java_Package_DEPENDS_ON_Relationship.cypher
similarity index 86%
rename from cypher/Package_Relationship_Weights/Add_weight_property_to_Package_DEPENDS_ON_Relationship.cypher
rename to cypher/DependsOn_Relationship_Weights/Add_weight_property_to_Java_Package_DEPENDS_ON_Relationship.cypher
index 95adeba73..deca54f28 100644
--- a/cypher/Package_Relationship_Weights/Add_weight_property_to_Package_DEPENDS_ON_Relationship.cypher
+++ b/cypher/DependsOn_Relationship_Weights/Add_weight_property_to_Java_Package_DEPENDS_ON_Relationship.cypher
@@ -1,6 +1,6 @@
// Add weight property to Package DEPENDS_ON Relationship
- MATCH (sourcePackage:Package)-[:CONTAINS]->(sourceType:Type)-[typeDependency:DEPENDS_ON]->(dependentType:Type)<-[:CONTAINS]-(dependentPackage:Package)
+ MATCH (sourcePackage:Java:Package)-[:CONTAINS]->(sourceType:Type)-[typeDependency:DEPENDS_ON]->(dependentType:Type)<-[:CONTAINS]-(dependentPackage:Java:Package)
MATCH (sourcePackage)-[packageDependency:DEPENDS_ON]->(dependentPackage)
WHERE sourcePackage.fqn <> dependentPackage.fqn
WITH packageDependency
diff --git a/cypher/Exploration/Explore_DEPENDS_ON_relationships.cypher b/cypher/Exploration/Explore_DEPENDS_ON_relationships.cypher
new file mode 100644
index 000000000..d566b18e9
--- /dev/null
+++ b/cypher/Exploration/Explore_DEPENDS_ON_relationships.cypher
@@ -0,0 +1,8 @@
+// Explore DEPENDS_ON relationships schema
+
+MATCH (s)-[r:DEPENDS_ON]->(t)
+RETURN labels(s) AS sourceLabels
+ ,labels(t) AS targetLabels
+ ,keys(r) AS relationshipKeys
+ ,count(*) AS numberOfNodes
+ORDER BY sourceLabels, targetLabels, relationshipKeys
\ No newline at end of file
diff --git a/cypher/Exploration/Explore_ExternalDeclaration.cypher b/cypher/Exploration/Explore_ExternalDeclaration.cypher
new file mode 100644
index 000000000..a6d318282
--- /dev/null
+++ b/cypher/Exploration/Explore_ExternalDeclaration.cypher
@@ -0,0 +1,12 @@
+// External declarations split by their module and their contained symbols
+
+MATCH (s:ExternalDeclaration)
+ WITH *
+ ,replace(split(s.globalFqn, '".')[0],'"', '') AS externalDeclarationModule
+ ,split(s.globalFqn, '".')[1] AS externalDeclarationSymbol
+RETURN externalDeclarationModule
+ ,count(DISTINCT externalDeclarationSymbol) AS externalDeclarationSymbols
+ ,collect(externalDeclarationSymbol)[0..4] AS someExternalDeclarationSymbols
+ ,count(*) as numberOfExternalDeclarations
+ORDER BY numberOfExternalDeclarations DESC, externalDeclarationModule ASC
+LIMIT 50
\ No newline at end of file
diff --git a/cypher/Exploration/Explore_Module_outgoing_dependencies.cypher b/cypher/Exploration/Explore_Module_outgoing_dependencies.cypher
new file mode 100644
index 000000000..59c0f96e1
--- /dev/null
+++ b/cypher/Exploration/Explore_Module_outgoing_dependencies.cypher
@@ -0,0 +1,17 @@
+// Explore outgoing dependencies of modules
+
+MATCH (source:Module)-[rm:DEPENDS_ON]->(module:ExternalModule)
+OPTIONAL MATCH (module)-[:EXPORTS]->(declaration:ExternalDeclaration)<-[re:DEPENDS_ON]-(source)
+OPTIONAL MATCH (source)-[:DECLARES]->(implementation:Function|Variable)-[ri:DEPENDS_ON]->(module)
+OPTIONAL MATCH (source)-[:DECLARES]->(abstract:Interface|TypeAlias)-[ra:DEPENDS_ON]->(module)
+RETURN source.globalFqn AS module
+ ,count(DISTINCT abstract) AS numberOfAbstract
+ ,count(DISTINCT declaration) AS numberOfExternalDeclarations
+ ,count(DISTINCT implementation) AS numberOfImplementations
+ ,sum(rm.cardinality) AS sumModuleDependencyWeights
+ ,sum(re.cardinality) AS sumExternalDeclarationWeights
+ ,sum(ri.cardinality) AS sumImplementationWeights
+ ,sum(ra.cardinality) AS sumAbstractWeights
+ //TODO sumWeights should sum up to sumModuleDependencyWeights
+ ,sum(re.cardinality) + sum(ri.cardinality) + sum(ra.cardinality) as sumWeights
+ORDER BY source.globalFqn ASC
\ No newline at end of file
diff --git a/cypher/Exploration/Explore_Typescript_elements_with_same_globalFqn.cypher b/cypher/Exploration/Explore_Typescript_elements_with_same_globalFqn.cypher
new file mode 100644
index 000000000..ca98aed19
--- /dev/null
+++ b/cypher/Exploration/Explore_Typescript_elements_with_same_globalFqn.cypher
@@ -0,0 +1,11 @@
+// Explore Typescript elements with same globalFqn
+
+ MATCH (ts1:TS)
+ WHERE ts1.globalFqn CONTAINS '"'
+ MATCH (ts2:TS)
+ WHERE ts2.globalFqn CONTAINS '"'
+ AND (ts2.globalFqn = ts1.globalFqn
+ OR toLower(ts2.globalFqn) = toLower(ts1.globalFqn))
+ AND ts1 <> ts2
+RETURN labels(ts1), labels(ts2), count(*)
+LIMIT 30
\ No newline at end of file
diff --git a/cypher/Exploration/Explore_Typescript_modules_overview.cypher b/cypher/Exploration/Explore_Typescript_modules_overview.cypher
new file mode 100644
index 000000000..b914a029c
--- /dev/null
+++ b/cypher/Exploration/Explore_Typescript_modules_overview.cypher
@@ -0,0 +1,15 @@
+// Explore nodes grouped by their module (first part of globalFqn)
+
+ MATCH (n:TS)
+ WHERE n.globalFqn IS NOT NULL
+UNWIND labels(n) AS nodeLabel
+ WITH replace(split(n.globalFqn, '".')[0],'"', '') AS module
+ ,collect(split(n.globalFqn, '".')[1]) AS symbols
+ ,nodeLabel
+ ,count(DISTINCT n) as numberOfNodes
+ WHERE nodeLabel <> 'TS'
+RETURN module
+ ,collect(DISTINCT nodeLabel) AS nodeLabels
+ ,sum(numberOfNodes) AS numberOfNodes
+ ,collect(symbols[0..4]) AS symbolExamples
+ORDER BY module ASC
\ No newline at end of file
diff --git a/cypher/Exploration/Explore_Typescript_modules_per_symboltype.cypher b/cypher/Exploration/Explore_Typescript_modules_per_symboltype.cypher
new file mode 100644
index 000000000..6e43488b0
--- /dev/null
+++ b/cypher/Exploration/Explore_Typescript_modules_per_symboltype.cypher
@@ -0,0 +1,15 @@
+// Explore nodes grouped by their module (first part of globalFqn) and their type of contained symbols
+
+ MATCH (n:TS)
+ WHERE n.globalFqn IS NOT NULL
+UNWIND labels(n) AS nodeLabel
+ WITH replace(split(n.globalFqn, '".')[0],'"', '') AS module
+ ,collect(split(n.globalFqn, '".')[1]) AS symbols
+ ,nodeLabel
+ ,count(DISTINCT n) as numberOfNodes
+ WHERE nodeLabel <> 'TS'
+RETURN module
+ ,collect(DISTINCT nodeLabel) as nodeLabels
+ ,numberOfNodes
+ ,symbols[0..4] AS symbolExamples
+ORDER BY module ASC
\ No newline at end of file
diff --git a/cypher/Exploration/Explore_Typescript_projects.cypher b/cypher/Exploration/Explore_Typescript_projects.cypher
new file mode 100644
index 000000000..317aab46f
--- /dev/null
+++ b/cypher/Exploration/Explore_Typescript_projects.cypher
@@ -0,0 +1,5 @@
+// Explore Typescript Projects
+
+MATCH (project:TS:Project)-[:HAS_ROOT]->(dir:Directory)-[:CONTAINS]->(module:Module)
+OPTIONAL MATCH (project)<-[:REFERENCED_PROJECTS*]-(top:TS:Project)
+RETURN project, dir, module, top
\ No newline at end of file
diff --git a/cypher/Exploration/Explore_degree_schema.cypher b/cypher/Exploration/Explore_degree_schema.cypher
new file mode 100644
index 000000000..5d71324ae
--- /dev/null
+++ b/cypher/Exploration/Explore_degree_schema.cypher
@@ -0,0 +1,14 @@
+// Explore incoming/outgoing relationship (degree) schema
+
+MATCH (source)-[relation]-(target)
+OPTIONAL MATCH (source)<-[incoming]-(target)
+OPTIONAL MATCH (source)-[outgoing]->(target)
+RETURN labels(source) AS sourceType
+ ,type(incoming) AS incomingRelationship
+ ,type(outgoing) AS outgoingRelationship
+ ,labels(target) AS dependentType
+ ,count(distinct source) AS sourceCount
+ ,count(distinct target) AS dependentCount
+ ,collect(distinct coalesce(source.globalFqn, source.name, source.referencedGlobalFqn))[0..9] AS sourceNameExamples
+ ,collect(distinct coalesce(target.globalFqn, target.name, target.referencedGlobalFqn))[0..9] AS dependentNameExamples
+ORDER BY sourceType, dependentType
\ No newline at end of file
diff --git a/cypher/Exploration/Explore_node_properties.cypher b/cypher/Exploration/Explore_node_properties.cypher
new file mode 100644
index 000000000..91fb0be89
--- /dev/null
+++ b/cypher/Exploration/Explore_node_properties.cypher
@@ -0,0 +1,11 @@
+// Explore node properties, the labels of their nodes and their count
+
+MATCH (n)
+UNWIND keys(n) AS nodePropertyName
+UNWIND labels(n) AS nodeLabel
+RETURN nodePropertyName
+ ,collect(DISTINCT nodeLabel) AS nodeLabels
+ ,count(DISTINCT n) AS numberOfNodes
+ ,count(DISTINCT n[nodePropertyName]) AS numberOfDistinctValues
+ ,collect(DISTINCT n[nodePropertyName])[0..4] AS exampleValues
+ORDER BY nodePropertyName
\ No newline at end of file
diff --git a/cypher/Exploration/Explore_node_relationships.cypher b/cypher/Exploration/Explore_node_relationships.cypher
new file mode 100644
index 000000000..187ef8b13
--- /dev/null
+++ b/cypher/Exploration/Explore_node_relationships.cypher
@@ -0,0 +1,11 @@
+// Get all relationships of one specific node to explore the schema
+
+MATCH (s:TS:Module)-[r]->(t)
+RETURN labels(s) AS sourceLabels
+ ,keys(s) AS sourceKeys
+ ,labels(t) AS targetLabels
+ ,keys(t) AS targetKeys
+ ,type(r) AS relationshipType
+ ,keys(r) AS relationshipKeys
+ ,count(*) AS numberOfNodes
+ORDER BY sourceLabels, targetLabels, relationshipKeys
\ No newline at end of file
diff --git a/cypher/External_Dependencies/List_external_Java_types_used.cypher b/cypher/External_Dependencies/List_external_Java_types_used.cypher
new file mode 100644
index 000000000..52168e9f6
--- /dev/null
+++ b/cypher/External_Dependencies/List_external_Java_types_used.cypher
@@ -0,0 +1,3 @@
+// List external Java types used
+
+MATCH (external:Java:ExternalType) RETURN external.fqn
\ No newline at end of file
diff --git a/cypher/External_Dependencies/List_external_types_used.cypher b/cypher/External_Dependencies/List_external_types_used.cypher
deleted file mode 100644
index 4b9dfac3a..000000000
--- a/cypher/External_Dependencies/List_external_types_used.cypher
+++ /dev/null
@@ -1,3 +0,0 @@
-// List external types used
-
-MATCH (external:ExternalType) RETURN external.fqn
\ No newline at end of file
diff --git a/cypher/Get_all_declared_and_inherited_methods_of_a_type.cypher b/cypher/Java/Get_all_declared_and_inherited_methods_of_a_type.cypher
similarity index 100%
rename from cypher/Get_all_declared_and_inherited_methods_of_a_type.cypher
rename to cypher/Java/Get_all_declared_and_inherited_methods_of_a_type.cypher
diff --git a/cypher/Method_Projection/Methods_1_Delete_Projection.cypher b/cypher/Method_Projection/Methods_1_Delete_Projection.cypher
deleted file mode 100644
index 16cb66e1b..000000000
--- a/cypher/Method_Projection/Methods_1_Delete_Projection.cypher
+++ /dev/null
@@ -1,5 +0,0 @@
-// Delete projection if existing. Variables: dependencies_projection
-
- CALL gds.graph.drop($dependencies_projection + '-cleaned', false)
- YIELD graphName, nodeCount, relationshipCount, creationTime, modificationTime
-RETURN graphName, nodeCount, relationshipCount, creationTime, modificationTime
\ No newline at end of file
diff --git a/cypher/Metrics/Calculate_and_set_Abstractness.cypher b/cypher/Metrics/Calculate_and_set_Abstractness.cypher
deleted file mode 100644
index 5eaafbbd4..000000000
--- a/cypher/Metrics/Calculate_and_set_Abstractness.cypher
+++ /dev/null
@@ -1,6 +0,0 @@
-//Calculate and set Abstractness
- MATCH (p:Package)
- WITH p
- ,toFloat(p.numberAbstractTypes) / (p.numberTypes + 1E-38) AS abstractness
- SET p.abstractness = abstractness
-RETURN p.fqn AS packageName, p.numberAbstractTypes, p.numberTypes, abstractness
\ No newline at end of file
diff --git a/cypher/Metrics/Calculate_and_set_Abstractness_including_Counts.cypher b/cypher/Metrics/Calculate_and_set_Abstractness_for_Java.cypher
similarity index 100%
rename from cypher/Metrics/Calculate_and_set_Abstractness_including_Counts.cypher
rename to cypher/Metrics/Calculate_and_set_Abstractness_for_Java.cypher
diff --git a/cypher/Metrics/Calculate_and_set_Abstractness_including_Subpackages.cypher b/cypher/Metrics/Calculate_and_set_Abstractness_for_Java_including_Subpackages.cypher
similarity index 100%
rename from cypher/Metrics/Calculate_and_set_Abstractness_including_Subpackages.cypher
rename to cypher/Metrics/Calculate_and_set_Abstractness_for_Java_including_Subpackages.cypher
diff --git a/cypher/Metrics/Calculate_and_set_Abstractness_for_Typescript.cypher b/cypher/Metrics/Calculate_and_set_Abstractness_for_Typescript.cypher
new file mode 100644
index 000000000..199e2cf84
--- /dev/null
+++ b/cypher/Metrics/Calculate_and_set_Abstractness_for_Typescript.cypher
@@ -0,0 +1,25 @@
+//Calculate and set Abstractness for Typescript Modules
+
+MATCH (module:TS:Module)
+// Get the project of the external module if available
+OPTIONAL MATCH (projectdir:Directory)<-[:HAS_ROOT]-(project:TS:Project)-[:CONTAINS]->(module)
+ WITH reverse(split(reverse(projectdir.absoluteFileName), '/')[0]) AS projectName
+ ,module
+ ,count{(module)-[:EXPORTS]->(:TS)} AS numberTypes
+ ,count{(module)-[:EXPORTS]->(:Class{abstract:true})} AS numberAbstractClasses
+ ,count{(module)-[:EXPORTS]->(:TypeAlias)} AS numberTypeAliases
+ ,count{(module)-[:EXPORTS]->(:Interface)} AS numberInterfaces
+ WITH *
+ ,numberInterfaces + numberTypeAliases + numberAbstractClasses AS numberAbstractTypes
+ WITH *
+ ,toFloat(numberAbstractTypes) / (numberTypes + 1E-38) AS abstractness
+ SET module.abstractness = abstractness
+ ,module.numberOfAbstractTypes = numberAbstractTypes
+ ,module.numberOfTypes = numberTypes
+RETURN projectName
+ ,module.globalFqn AS fullQualifiedModuleName
+ ,module.name AS moduleName
+ ,abstractness
+ ,numberAbstractTypes
+ ,numberTypes
+ORDER BY abstractness ASC, numberTypes DESC
\ No newline at end of file
diff --git a/cypher/Metrics/Calculate_and_set_Instability_outgoing_incoming_Dependencies.cypher b/cypher/Metrics/Calculate_and_set_Instability_for_Java.cypher
similarity index 81%
rename from cypher/Metrics/Calculate_and_set_Instability_outgoing_incoming_Dependencies.cypher
rename to cypher/Metrics/Calculate_and_set_Instability_for_Java.cypher
index 7330bc7e9..17b549a51 100644
--- a/cypher/Metrics/Calculate_and_set_Instability_outgoing_incoming_Dependencies.cypher
+++ b/cypher/Metrics/Calculate_and_set_Instability_for_Java.cypher
@@ -1,6 +1,9 @@
-// Calculate and set Instability = outgoing / (outgoing + incoming) Dependencies
+// Calculate and set Instability for Java
+// Instability = outgoing / (outgoing + incoming) Dependencies
- MATCH (p:Package)
+ MATCH (p:Java:Package)
+WHERE p.incomingDependencies > 0
+ AND p.outgoingDependencies > 0
WITH p
,toFloat(p.outgoingDependencies) / (p.outgoingDependencies + p.incomingDependencies + 1E-38) as instability
,toFloat(p.outgoingDependentTypes) / (p.outgoingDependentTypes + p.incomingDependentTypes + 1E-38) as instabilityTypes
@@ -18,9 +21,10 @@
,instabilityInterfaces
,instabilityPackages
,instabilityArtifacts
-WHERE p.incomingDependencies > 0
- AND p.outgoingDependencies > 0
-RETURN DISTINCT p.fqn, p.name
+MATCH (artifact:Artifact)-[:CONTAINS]->(p)
+RETURN replace(last(split(artifact.fileName, '/')), '.jar', '') AS artifactName
+ ,p.fqn AS fullQualifiedPackageName
+ ,p.name AS packageName
,instability
,instabilityTypes
,instabilityInterfaces
diff --git a/cypher/Metrics/Calculate_and_set_Instability_Including_Subpackages.cypher b/cypher/Metrics/Calculate_and_set_Instability_for_Java_Including_Subpackages.cypher
similarity index 90%
rename from cypher/Metrics/Calculate_and_set_Instability_Including_Subpackages.cypher
rename to cypher/Metrics/Calculate_and_set_Instability_for_Java_Including_Subpackages.cypher
index fc3b9b4d4..20d388475 100644
--- a/cypher/Metrics/Calculate_and_set_Instability_Including_Subpackages.cypher
+++ b/cypher/Metrics/Calculate_and_set_Instability_for_Java_Including_Subpackages.cypher
@@ -1,6 +1,6 @@
// Calculate and set Instability = outgoing / (outgoing + incoming) Dependencies
- MATCH (p:Package)
+ MATCH (p:Java:Package)
WHERE p.incomingDependenciesIncludingSubpackages > 0
AND p.outgoingDependenciesIncludingSubpackages > 0
WITH p
@@ -20,7 +20,10 @@
,instabilityInterfaces
,instabilityPackages
,instabilityArtifacts
-RETURN DISTINCT p.fqn, p.name
+ MATCH (artifact:Artifact)-[:CONTAINS]->(p)
+RETURN replace(last(split(artifact.fileName, '/')), '.jar', '') AS artifactName
+ ,p.fqn AS fullQualifiedPackageName
+ ,p.name AS packageName
,instability
,instabilityTypes
,instabilityInterfaces
diff --git a/cypher/Metrics/Calculate_and_set_Instability_for_Typescript.cypher b/cypher/Metrics/Calculate_and_set_Instability_for_Typescript.cypher
new file mode 100644
index 000000000..5c0e434a1
--- /dev/null
+++ b/cypher/Metrics/Calculate_and_set_Instability_for_Typescript.cypher
@@ -0,0 +1,35 @@
+// Calculate and set Instability = outgoing / (outgoing + incoming) Dependencies
+
+ MATCH (module:TS:Module)
+OPTIONAL MATCH (projectdir:Directory)<-[:HAS_ROOT]-(project:TS:Project)-[:CONTAINS]->(module)
+ WITH module
+ ,reverse(split(reverse(projectdir.absoluteFileName), '/')[0]) AS projectName
+ ,toFloat(module.outgoingDependencies) / (module.outgoingDependencies + module.incomingDependencies + 1E-38) as instability
+ ,toFloat(module.outgoingDependentAbstractTypes) / (module.outgoingDependentAbstractTypes + module.incomingDependentAbstractTypes + 1E-38) as instabilityAbstractTypes
+ ,toFloat(module.outgoingDependentModules) / (module.outgoingDependentModules + module.incomingDependentModules + 1E-38) as instabilityModules
+ ,toFloat(module.outgoingDependentPackages) / (module.outgoingDependentPackages + module.incomingDependentPackages + 1E-38) as instabilityPackages
+ SET module.instability = instability
+ ,module.instabilityAbstractTypes = instabilityAbstractTypes
+ ,module.instabilityModules = instabilityModules
+ ,module.instabilityPackages = instabilityPackages
+ WITH module
+ ,projectName
+ ,instability
+ ,instabilityAbstractTypes
+ ,instabilityModules
+ ,instabilityPackages
+WHERE module.incomingDependencies IS NOT NULL
+ AND module.outgoingDependencies IS NOT NULL
+RETURN DISTINCT
+ projectName
+ ,module.globalFqn AS fullQualifiedModuleName
+ ,module.name AS moduleName
+ ,instability
+ ,instabilityAbstractTypes
+ ,instabilityModules
+ ,instabilityPackages
+ ,module.outgoingDependencies, module.incomingDependencies
+ ,module.outgoingDependentAbstractTypes, module.incomingDependentAbstractTypes
+ ,module.outgoingDependentModules, module.incomingDependentModules
+ ,module.outgoingDependentPackages, module.incomingDependentPackages
+ORDER BY instability ASC, fullQualifiedModuleName ASC
\ No newline at end of file
diff --git a/cypher/Metrics/Calculate_distance_between_abstractness_and_instability.cypher b/cypher/Metrics/Calculate_distance_between_abstractness_and_instability_for_Java.cypher
similarity index 55%
rename from cypher/Metrics/Calculate_distance_between_abstractness_and_instability.cypher
rename to cypher/Metrics/Calculate_distance_between_abstractness_and_instability_for_Java.cypher
index ec9b83f0b..7ac784b84 100644
--- a/cypher/Metrics/Calculate_distance_between_abstractness_and_instability.cypher
+++ b/cypher/Metrics/Calculate_distance_between_abstractness_and_instability_for_Java.cypher
@@ -1,11 +1,11 @@
// Calculate distance between abstractness and instability
-MATCH (artifact:Artifact)-[:CONTAINS]->(package:Package)
+MATCH (artifact:Artifact)-[:CONTAINS]->(package:Java:Package)
RETURN replace(last(split(artifact.fileName, '/')), '.jar', '') AS artifactName
- ,package.fqn AS fullQualifiedPackageName
- ,package.name AS packageName
+ ,package.fqn AS fullQualifiedName
+ ,package.name AS name
,abs(package.abstractness + package.instability - 1) AS distance
,package.abstractness AS abstractness
,package.instability AS instability
- ,package.numberOfTypes AS typesInPackage
- ORDER BY distance DESC, typesInPackage DESC
\ No newline at end of file
+ ,package.numberOfTypes AS elementsCount
+ ORDER BY distance DESC, elementsCount DESC
\ No newline at end of file
diff --git a/cypher/Metrics/Calculate_distance_between_abstractness_and_instability_including_subpackages.cypher b/cypher/Metrics/Calculate_distance_between_abstractness_and_instability_for_Java_including_subpackages.cypher
similarity index 68%
rename from cypher/Metrics/Calculate_distance_between_abstractness_and_instability_including_subpackages.cypher
rename to cypher/Metrics/Calculate_distance_between_abstractness_and_instability_for_Java_including_subpackages.cypher
index a38138582..a4dad3084 100644
--- a/cypher/Metrics/Calculate_distance_between_abstractness_and_instability_including_subpackages.cypher
+++ b/cypher/Metrics/Calculate_distance_between_abstractness_and_instability_for_Java_including_subpackages.cypher
@@ -1,14 +1,14 @@
// Calculate distance between abstractness and instability including subpackages
- MATCH (artifact:Artifact)-[:CONTAINS]->(package:Package)
+ MATCH (artifact:Artifact)-[:CONTAINS]->(package:Java:Package)
WHERE package.abstractnessIncludingSubpackages IS NOT NULL
AND package.instabilityIncludingSubpackages IS NOT NULL
AND package.numberOfTypesIncludingSubpackages IS NOT NULL
RETURN replace(last(split(artifact.fileName, '/')), '.jar', '') AS artifactName
- ,package.fqn AS fullQualifiedPackageName
- ,package.name AS packageName
+ ,package.fqn AS fullQualifiedName
+ ,package.name AS name
,abs(package.abstractnessIncludingSubpackages + package.instabilityIncludingSubpackages - 1) AS distance
,package.abstractnessIncludingSubpackages AS abstractness
,package.instabilityIncludingSubpackages AS instability
- ,package.numberOfTypesIncludingSubpackages AS typesInPackage
- ORDER BY distance DESC, typesInPackage DESC
\ No newline at end of file
+ ,package.numberOfTypesIncludingSubpackages AS elementsCount
+ ORDER BY distance DESC, elementsCount DESC
\ No newline at end of file
diff --git a/cypher/Metrics/Calculate_distance_between_abstractness_and_instability_for_Typescript.cypher b/cypher/Metrics/Calculate_distance_between_abstractness_and_instability_for_Typescript.cypher
new file mode 100644
index 000000000..576d7f25d
--- /dev/null
+++ b/cypher/Metrics/Calculate_distance_between_abstractness_and_instability_for_Typescript.cypher
@@ -0,0 +1,12 @@
+// Calculate distance between abstractness and instability for Typescript
+
+MATCH (module:TS:Module)
+OPTIONAL MATCH (projectDirectory:Directory)<-[:HAS_ROOT]-(project:TS:Project)-[:CONTAINS]->(module)
+RETURN reverse(split(reverse(projectDirectory.absoluteFileName), '/')[0]) AS artifactName
+ ,module.globalFqn AS fullQualifiedName
+ ,module.localFqn AS name
+ ,abs(module.abstractness + module.instability - 1) AS distance
+ ,module.abstractness AS abstractness
+ ,module.instability AS instability
+ ,module.numberOfTypes AS elementsCount
+ ORDER BY distance DESC, elementsCount DESC
\ No newline at end of file
diff --git a/cypher/Metrics/Get_Abstractness_for_Java.cypher b/cypher/Metrics/Get_Abstractness_for_Java.cypher
new file mode 100644
index 000000000..07e74ecf7
--- /dev/null
+++ b/cypher/Metrics/Get_Abstractness_for_Java.cypher
@@ -0,0 +1,12 @@
+// Get Java Packages with the lowest abstractness first (if set before)
+
+MATCH (package:Java:Package)
+WHERE package.abstractness IS NOT NULL
+MATCH (artifact:Artifact)-[:CONTAINS]->(package)
+RETURN replace(last(split(artifact.fileName, '/')), '.jar', '') AS artifactName
+ ,package.fqn AS fullQualifiedPackageName
+ ,package.name AS packageName
+ ,package.abstractness AS abstractness
+ ,package.numberOfAbstractTypes AS numberAbstractTypes
+ ,package.numberOfTypes AS numberTypes
+ORDER BY abstractness ASC, numberTypes DESC
\ No newline at end of file
diff --git a/cypher/Metrics/Get_Abstractness_for_Java_including_Subpackages.cypher b/cypher/Metrics/Get_Abstractness_for_Java_including_Subpackages.cypher
new file mode 100644
index 000000000..bdf6b1e7c
--- /dev/null
+++ b/cypher/Metrics/Get_Abstractness_for_Java_including_Subpackages.cypher
@@ -0,0 +1,13 @@
+// Get Java Packages including their sub packages with the lowest abstractness first (if set before)
+
+ MATCH path = (package:Java:Package)-[:CONTAINS*0..]->(subpackage:Java:Package)
+ WHERE package.abstractnessIncludingSubpackages IS NOT NULL
+ MATCH (artifact:Artifact)-[:CONTAINS]->(package)
+RETURN replace(last(split(artifact.fileName, '/')), '.jar', '') AS artifactName
+ ,package.fqn AS fullQualifiedPackageName
+ ,package.name AS packageName
+ ,package.abstractnessIncludingSubpackages AS abstractness
+ ,package.numberOfAbstractTypesIncludingSubpackages AS numberAbstractTypes
+ ,package.numberOfTypesIncludingSubpackages AS numberTypes
+ ,max(length(path)) AS maxSubpackageDepth
+ORDER BY abstractness ASC, maxSubpackageDepth DESC, numberTypes DESC
\ No newline at end of file
diff --git a/cypher/Metrics/Get_Abstractness_for_Typescript.cypher b/cypher/Metrics/Get_Abstractness_for_Typescript.cypher
new file mode 100644
index 000000000..4b65f48d1
--- /dev/null
+++ b/cypher/Metrics/Get_Abstractness_for_Typescript.cypher
@@ -0,0 +1,12 @@
+// Get Typscript Modules with the lowest abstractness first (if set before)
+
+MATCH (module:TS:Module)
+WHERE module.abstractness IS NOT NULL
+OPTIONAL MATCH (projectdir:Directory)<-[:HAS_ROOT]-(project:TS:Project)-[:CONTAINS]->(module)
+RETURN reverse(split(reverse(projectdir.absoluteFileName), '/')[0]) AS projectName
+ ,module.globalFqn AS fullQualifiedModuleName
+ ,module.name AS moduleName
+ ,module.abstractness AS abstractness
+ ,module.numberAbstractTypes AS numberAbstractTypes
+ ,module.numberTypes AS numberTypes
+ORDER BY abstractness ASC, numberTypes DESC
\ No newline at end of file
diff --git a/cypher/Metrics/Get_Incoming_Java_Package_Dependencies.cypher b/cypher/Metrics/Get_Incoming_Java_Package_Dependencies.cypher
new file mode 100644
index 000000000..60b95a2aa
--- /dev/null
+++ b/cypher/Metrics/Get_Incoming_Java_Package_Dependencies.cypher
@@ -0,0 +1,15 @@
+// Get Java Packages with the most incoming dependencies first (if set before)
+
+MATCH (p:Java:Package)
+WHERE p.incomingDependencies IS NOT NULL
+MATCH (artifact:Artifact)-[:CONTAINS]->(p)
+RETURN replace(last(split(artifact.fileName, '/')), '.jar', '') AS artifactName
+ ,p.fqn AS fullQualifiedPackageName
+ ,p.name AS packageName
+ ,p.incomingDependencies AS incomingDependencies
+ ,p.incomingDependenciesWeight AS incomingDependenciesWeight
+ ,p.incomingDependentTypes AS incomingDependentTypes
+ ,p.incomingDependentInterfaces AS incomingDependentInterfaces
+ ,p.incomingDependentPackages AS incomingDependentPackages
+ ,p.incomingDependentArtifacts AS incomingDependentArtifacts
+ORDER BY incomingDependencies DESC, p.fqn ASC // package with most incoming dependencies first
\ No newline at end of file
diff --git a/cypher/Metrics/Get_Incoming_Java_Package_Dependencies_Including_Subpackages.cypher b/cypher/Metrics/Get_Incoming_Java_Package_Dependencies_Including_Subpackages.cypher
new file mode 100644
index 000000000..9d3b3cce3
--- /dev/null
+++ b/cypher/Metrics/Get_Incoming_Java_Package_Dependencies_Including_Subpackages.cypher
@@ -0,0 +1,15 @@
+// Get Java Packages including their sub-packages with the most incoming dependencies first (if set before)
+
+MATCH (p:Java:Package)
+WHERE p.incomingDependenciesIncludingSubpackages IS NOT NULL
+MATCH (artifact:Artifact)-[:CONTAINS]->(p)
+RETURN replace(last(split(artifact.fileName, '/')), '.jar', '') AS artifactName
+ ,p.fqn AS fullQualifiedPackageName
+ ,p.name AS packageName
+ ,p.incomingDependenciesIncludingSubpackages AS incomingDependencies
+ ,p.incomingDependenciesWeightIncludingSubpackages AS incomingDependenciesWeight
+ ,p.incomingDependentTypesIncludingSubpackages AS incomingDependentTypes
+ ,p.incomingDependentInterfacesIncludingSubpackages AS incomingDependentInterfaces
+ ,p.incomingDependentPackagesIncludingSubpackages AS incomingDependentPackages
+ ,p.incomingDependentArtifactsIncludingSubpackages AS incomingDependentArtifacts
+ORDER BY incomingDependencies DESC, p.fqn ASC // package with most incoming dependencies first
\ No newline at end of file
diff --git a/cypher/Metrics/Get_Incoming_Typescript_Module_Dependencies.cypher b/cypher/Metrics/Get_Incoming_Typescript_Module_Dependencies.cypher
new file mode 100644
index 000000000..2a2f4a93a
--- /dev/null
+++ b/cypher/Metrics/Get_Incoming_Typescript_Module_Dependencies.cypher
@@ -0,0 +1,13 @@
+// Get Typscript Modules with the most incoming dependencies first (if set before)
+
+ MATCH (module:TS:Module)
+ WHERE module.incomingDependencies IS NOT NULL
+RETURN module.globalFqn AS fullQualifiedModuleName
+ ,module.name AS moduleName
+ ,module.incomingDependencies AS incomingDependencies
+ ,module.incomingDependenciesWeight AS incomingDependenciesWeight
+ ,module.incomingDependentAbstractTypes AS incomingDependentAbstractTypes
+ ,module.incomingDependentAbstractTypeWeight AS incomingDependentAbstractTypeWeight
+ ,module.incomingDependentModules AS incomingDependentModules
+ ,module.incomingDependentPackages AS incomingDependentPackages
+ORDER BY incomingDependencies DESC, fullQualifiedModuleName ASC
\ No newline at end of file
diff --git a/cypher/Metrics/Get_Instability_for_Java.cypher b/cypher/Metrics/Get_Instability_for_Java.cypher
new file mode 100644
index 000000000..c84088d94
--- /dev/null
+++ b/cypher/Metrics/Get_Instability_for_Java.cypher
@@ -0,0 +1,20 @@
+// Get Java Packages with the lowest Instability (outgoing / all dependencies) first (if set before)
+// Instability = outgoing / (outgoing + incoming) Dependencies
+
+ MATCH (p:Java:Package)
+ WHERE p.instability IS NOT NULL
+MATCH (artifact:Artifact)-[:CONTAINS]->(p)
+ RETURN replace(last(split(artifact.fileName, '/')), '.jar', '') AS artifactName
+ ,p.fqn AS fullQualifiedPackageName
+ ,p.name AS packageName
+ ,p.instability AS instability
+ ,p.instabilityTypes AS instabilityTypes
+ ,p.instabilityInterfaces AS instabilityInterfaces
+ ,p.instabilityPackages AS instabilityPackages
+ ,p.instabilityArtifacts AS instabilityArtifacts
+ ,p.outgoingDependencies, p.incomingDependencies
+ ,p.outgoingDependentTypes, p.incomingDependentTypes
+ ,p.outgoingDependentInterfaces, p.incomingDependentInterfaces
+ ,p.outgoingDependentPackages, p.incomingDependentPackages
+ ,p.outgoingDependentArtifacts, p.incomingDependentArtifacts
+ORDER BY instability ASC, p.fqn ASC
\ No newline at end of file
diff --git a/cypher/Metrics/Get_Instability_for_Java_Including_Subpackages.cypher b/cypher/Metrics/Get_Instability_for_Java_Including_Subpackages.cypher
new file mode 100644
index 000000000..296be6fe6
--- /dev/null
+++ b/cypher/Metrics/Get_Instability_for_Java_Including_Subpackages.cypher
@@ -0,0 +1,20 @@
+// Get Java Packages including their sub packages with the lowest Instability
+// Instability = outgoing / (outgoing + incoming) Dependencies
+
+ MATCH (p:Java:Package)
+ WHERE p.instabilityIncludingSubpackages IS NOT NULL
+ MATCH (artifact:Artifact)-[:CONTAINS]->(p)
+RETURN replace(last(split(artifact.fileName, '/')), '.jar', '') AS artifactName
+ ,p.fqn AS fullQualifiedPackageName
+ ,p.name AS packageName
+ ,p.instabilityIncludingSubpackages AS instability
+ ,p.instabilityTypesIncludingSubpackages AS instabilityTypes
+ ,p.instabilityInterfacesIncludingSubpackages AS instabilityInterfaces
+ ,p.instabilityPackagesIncludingSubpackages AS instabilityPackages
+ ,p.instabilityArtifactsIncludingSubpackages AS instabilityArtifacts
+ ,p.outgoingDependenciesIncludingSubpackages, p.incomingDependenciesIncludingSubpackages
+ ,p.outgoingDependentTypesIncludingSubpackages, p.incomingDependentTypesIncludingSubpackages
+ ,p.outgoingDependentInterfacesIncludingSubpackages, p.incomingDependentInterfacesIncludingSubpackages
+ ,p.outgoingDependentPackagesIncludingSubpackages, p.incomingDependentPackagesIncludingSubpackages
+ ,p.outgoingDependentArtifactsIncludingSubpackages, p.incomingDependentArtifactsIncludingSubpackages
+ORDER BY instability ASC, p.fqn ASC
\ No newline at end of file
diff --git a/cypher/Metrics/Get_Instability_for_Typescript.cypher b/cypher/Metrics/Get_Instability_for_Typescript.cypher
new file mode 100644
index 000000000..c8b21d2c7
--- /dev/null
+++ b/cypher/Metrics/Get_Instability_for_Typescript.cypher
@@ -0,0 +1,17 @@
+// Get Typscript Modules with the lowest Instability (outgoing / all dependencies) first (if set before)
+
+ MATCH (module:TS:Module)
+ WHERE module.instability IS NOT NULL
+OPTIONAL MATCH (projectdir:Directory)<-[:HAS_ROOT]-(project:TS:Project)-[:CONTAINS]->(module)
+RETURN reverse(split(reverse(projectdir.absoluteFileName), '/')[0]) AS projectName
+ ,module.globalFqn AS fullQualifiedModuleName
+ ,module.name AS moduleName
+ ,module.instability AS instability
+ ,module.instabilityAbstractTypes AS instabilityAbstractTypes
+ ,module.instabilityModules AS instabilityModules
+ ,module.instabilityPackages AS instabilityPackages
+ ,module.outgoingDependencies, module.incomingDependencies
+ ,module.outgoingDependentAbstractTypes, module.incomingDependentAbstractTypes
+ ,module.outgoingDependentModules, module.incomingDependentModules
+ ,module.outgoingDependentPackages, module.incomingDependentPackages
+ORDER BY instability ASC, fullQualifiedModuleName ASC
\ No newline at end of file
diff --git a/cypher/Metrics/Get_Outgoing_Java_Package_Dependencies.cypher b/cypher/Metrics/Get_Outgoing_Java_Package_Dependencies.cypher
new file mode 100644
index 000000000..c3f7c1c98
--- /dev/null
+++ b/cypher/Metrics/Get_Outgoing_Java_Package_Dependencies.cypher
@@ -0,0 +1,15 @@
+// Get Java Packages with the most outgoing dependencies first (if set before)
+
+MATCH (p:Java:Package)
+WHERE p.outgoingDependencies IS NOT NULL
+MATCH (artifact:Artifact)-[:CONTAINS]->(p)
+RETURN replace(last(split(artifact.fileName, '/')), '.jar', '') AS artifactName
+ ,p.fqn AS fullQualifiedPackageName
+ ,p.name AS packageName
+ ,p.outgoingDependencies AS outgoingDependencies
+ ,p.outgoingDependenciesWeight AS outgoingDependenciesWeight
+ ,p.outgoingDependentTypes AS outgoingDependentTypes
+ ,p.outgoingDependentInterfaces AS outgoingDependentInterfaces
+ ,p.outgoingDependentPackages AS outgoingDependentPackages
+ ,p.outgoingDependentArtifacts AS outgoingDependentArtifacts
+ORDER BY outgoingDependencies DESC, p.fqn ASC // package with most incoming dependencies first
\ No newline at end of file
diff --git a/cypher/Metrics/Get_Outgoing_Java_Package_Dependencies_Including_Subpackages.cypher b/cypher/Metrics/Get_Outgoing_Java_Package_Dependencies_Including_Subpackages.cypher
new file mode 100644
index 000000000..1066549e1
--- /dev/null
+++ b/cypher/Metrics/Get_Outgoing_Java_Package_Dependencies_Including_Subpackages.cypher
@@ -0,0 +1,15 @@
+// Get Java Packages including their sub packages with the most outgoing dependencies first (if set before)
+
+MATCH (p:Java:Package)
+WHERE p.outgoingDependenciesIncludingSubpackages IS NOT NULL
+MATCH (artifact:Artifact)-[:CONTAINS]->(p)
+RETURN replace(last(split(artifact.fileName, '/')), '.jar', '') AS artifactName
+ ,p.fqn AS fullQualifiedPackageName
+ ,p.name AS packageName
+ ,p.outgoingDependenciesIncludingSubpackages AS outgoingDependencies
+ ,p.outgoingDependenciesWeightIncludingSubpackages AS outgoingDependenciesWeight
+ ,p.outgoingDependentTypesIncludingSubpackages AS outgoingDependentTypes
+ ,p.outgoingDependentInterfacesIncludingSubpackages AS outgoingDependentInterfaces
+ ,p.outgoingDependentPackagesIncludingSubpackages AS outgoingDependentPackages
+ ,p.outgoingDependentArtifactsIncludingSubpackages AS outgoingDependentArtifacts
+ORDER BY outgoingDependencies DESC, p.fqn ASC // package with most incoming dependencies first
\ No newline at end of file
diff --git a/cypher/Metrics/Get_Outgoing_Typescript_Module_Dependencies.cypher b/cypher/Metrics/Get_Outgoing_Typescript_Module_Dependencies.cypher
new file mode 100644
index 000000000..2fc0925c3
--- /dev/null
+++ b/cypher/Metrics/Get_Outgoing_Typescript_Module_Dependencies.cypher
@@ -0,0 +1,13 @@
+// Get Typscript Modules with the most outgoing dependencies first (if set before)
+
+ MATCH (module:TS:Module)
+ WHERE module.outgoingDependencies IS NOT NULL
+RETURN module.globalFqn AS fullQualifiedModuleName
+ ,module.name AS sourceName
+ ,module.outgoingDependencies AS outgoingDependencies
+ ,module.outgoingDependenciesWeight AS outgoingDependenciesWeight
+ ,module.outgoingDependentAbstractTypes AS outgoingDependentAbstractTypes
+ ,module.outgoingDependentAbstractTypeWeight AS outgoingDependentAbstractTypeWeight
+ ,module.outgoingDependentModules AS outgoingDependentModules
+ ,module.outgoingDependentPackages AS outgoingDependentPackages
+ORDER BY outgoingDependencies DESC, fullQualifiedModuleName ASC
\ No newline at end of file
diff --git a/cypher/Metrics/Set_Incoming_Package_Dependencies.cypher b/cypher/Metrics/Set_Incoming_Java_Package_Dependencies.cypher
similarity index 94%
rename from cypher/Metrics/Set_Incoming_Package_Dependencies.cypher
rename to cypher/Metrics/Set_Incoming_Java_Package_Dependencies.cypher
index 9ecea8567..39ad64019 100644
--- a/cypher/Metrics/Set_Incoming_Package_Dependencies.cypher
+++ b/cypher/Metrics/Set_Incoming_Java_Package_Dependencies.cypher
@@ -1,6 +1,6 @@
// Set Incoming Package Dependencies
- MATCH (p:Package)
+ MATCH (p:Java:Package)
MATCH (artifact:Artifact)-[:CONTAINS]->(p)
OPTIONAL MATCH (p)-[:CONTAINS]->(it:Java:Type)<-[r:DEPENDS_ON]-(et:Java:Type)<-[:CONTAINS]-(ep:Package)<-[:CONTAINS]-(ea:Artifact)
OPTIONAL MATCH (it)<-[:DEPENDS_ON]-(eti:Java:Type:Interface)
@@ -23,7 +23,8 @@ ORDER BY incomingDependencies DESC, p.fqn ASC // package with most incoming depe
,p.incomingDependentPackages = incomingDependentPackages
,p.incomingDependentArtifacts = incomingDependentArtifacts
RETURN artifactName
- ,p.fqn AS packageName
+ ,p.fqn AS fullQualifiedPackageName
+ ,p.name AS packageName
,incomingDependencies
,incomingDependenciesWeight
,incomingDependentTypes
diff --git a/cypher/Metrics/Set_Incoming_Package_Dependencies_Including_Subpackages.cypher b/cypher/Metrics/Set_Incoming_Java_Package_Dependencies_Including_Subpackages.cypher
similarity index 88%
rename from cypher/Metrics/Set_Incoming_Package_Dependencies_Including_Subpackages.cypher
rename to cypher/Metrics/Set_Incoming_Java_Package_Dependencies_Including_Subpackages.cypher
index e72694371..9e274b150 100644
--- a/cypher/Metrics/Set_Incoming_Package_Dependencies_Including_Subpackages.cypher
+++ b/cypher/Metrics/Set_Incoming_Java_Package_Dependencies_Including_Subpackages.cypher
@@ -1,6 +1,6 @@
// Set Incoming Package Dependencies including sub-packages
- MATCH (p:Package)
+ MATCH (p:Java:Package)
WITH *
,EXISTS{
MATCH (p)<-[:CONTAINS]-(ancestor:Package)-[:CONTAINS]->(sibling:Package)
@@ -9,7 +9,7 @@
,EXISTS{(p)-[:CONTAINS]->(:Type)} AS containsTypes
WHERE hasSiblingPackages OR containsTypes
MATCH (artifact:Artifact)-[:CONTAINS]->(p)
-OPTIONAL MATCH (p:Package)-[:CONTAINS*0..]->(sp:Package)-[:CONTAINS]->(st:Java:Type)<-[r:DEPENDS_ON]-(et:Java:Type)<-[:CONTAINS]-(ep:Package)<-[:CONTAINS]-(ea:Artifact)
+OPTIONAL MATCH (p)-[:CONTAINS*0..]->(sp:Package)-[:CONTAINS]->(st:Java:Type)<-[r:DEPENDS_ON]-(et:Java:Type)<-[:CONTAINS]-(ep:Package)<-[:CONTAINS]-(ea:Artifact)
WHERE NOT ep.fqn starts with p.fqn + '.'
AND ep.fqn <> p.fqn
// AND p.incomingDependenciesIncludingSubpackages IS NULL // comment out to recalculate
@@ -30,7 +30,8 @@ ORDER BY incomingDependencies DESC, p.fqn ASC // package with most incoming depe
,p.incomingDependentPackagesIncludingSubpackages = incomingDependentPackages
,p.incomingDependentArtifactsIncludingSubpackages = incomingDependentArtifacts
RETURN artifactName
- ,p.fqn AS packageName
+ ,p.fqn AS fullQualifiedPackageName
+ ,p.name AS packageName
,incomingDependencies
,incomingDependenciesWeight
,incomingDependentTypes
diff --git a/cypher/Metrics/Set_Incoming_Package_Method_Call_Dependencies.cypher b/cypher/Metrics/Set_Incoming_Java_Package_Method_Call_Dependencies.cypher
similarity index 98%
rename from cypher/Metrics/Set_Incoming_Package_Method_Call_Dependencies.cypher
rename to cypher/Metrics/Set_Incoming_Java_Package_Method_Call_Dependencies.cypher
index 93ebb23ed..32f1e0c78 100644
--- a/cypher/Metrics/Set_Incoming_Package_Method_Call_Dependencies.cypher
+++ b/cypher/Metrics/Set_Incoming_Java_Package_Method_Call_Dependencies.cypher
@@ -1,5 +1,5 @@
//Set Incoming Package Method Call Dependencies
-MATCH (p:Package)
+MATCH (p:Java:Package)
OPTIONAL MATCH (p)-[:CONTAINS]->(t:Java:Type)-[:DECLARES]->(m:Method)<-[:INVOKES]-(dm:Method)<-[:DECLARES]-(dt:Java:Type)<-[:CONTAINS]-(dp:Package)<-[:CONTAINS]-(da:Artifact)
OPTIONAL MATCH (dm)<-[:DECLARES]-(dti:Interface)
WHERE p <> dp
diff --git a/cypher/Metrics/Set_Incoming_Type_Dependencies.cypher b/cypher/Metrics/Set_Incoming_Java_Type_Dependencies.cypher
similarity index 98%
rename from cypher/Metrics/Set_Incoming_Type_Dependencies.cypher
rename to cypher/Metrics/Set_Incoming_Java_Type_Dependencies.cypher
index 5eb8ee92c..86aa100e0 100644
--- a/cypher/Metrics/Set_Incoming_Type_Dependencies.cypher
+++ b/cypher/Metrics/Set_Incoming_Java_Type_Dependencies.cypher
@@ -1,6 +1,6 @@
// Set Incoming Type Dependencies
- MATCH (p:Package)
+ MATCH (p:Java:Package)
OPTIONAL MATCH (p)-[:CONTAINS]->(it:Java:Type)<-[r:DEPENDS_ON]-(et:Java:Type)<-[:CONTAINS]-(ep:Package)<-[:CONTAINS]-(ea:Artifact)
OPTIONAL MATCH (it)<-[:DEPENDS_ON]-(eti:Type:Interface)
WHERE it <> et
diff --git a/cypher/Metrics/Set_Incoming_Typescript_Module_Dependencies.cypher b/cypher/Metrics/Set_Incoming_Typescript_Module_Dependencies.cypher
new file mode 100644
index 000000000..9d0863cd7
--- /dev/null
+++ b/cypher/Metrics/Set_Incoming_Typescript_Module_Dependencies.cypher
@@ -0,0 +1,37 @@
+// Set incoming Typscript Module dependencies
+
+// Get the top level dependency between a Typescript module and an external modules it uses
+ MATCH (source:TS:Module)
+OPTIONAL MATCH (source)<-[:RESOLVES_TO]-(sourceExternal:ExternalModule)<-[moduleDependency:DEPENDS_ON]-(target:TS:Module)
+ WHERE source <> target
+// Get the project of the external module if available
+OPTIONAL MATCH (projectdir:Directory)<-[:HAS_ROOT]-(project:TS:Project)-[:CONTAINS]->(target)
+// Aggregate all gathered information for each (grouped by) source module
+ WITH source
+ ,collect(DISTINCT projectdir.absoluteFileName) AS projectNames
+ ,count(DISTINCT target.globalFqn) AS externalModuleCount
+ ,sum(moduleDependency.declarationCount) AS declarationCount
+ ,sum(moduleDependency.abstractTypeCount) AS abstractTypeCount
+ ,sum(moduleDependency.cardinality) AS totalCardinality
+ ,sum(moduleDependency.abstractTypeCardinality) AS abstractTypeCardinality
+ ,collect(DISTINCT target.globalFqn)[0..4] AS externalModuleExamples
+ SET source.incomingDependencies = declarationCount
+ ,source.incomingDependenciesWeight = totalCardinality
+ ,source.incomingDependentAbstractTypes = abstractTypeCount
+ ,source.incomingDependentAbstractTypeWeight = abstractTypeCardinality
+ ,source.incomingDependentModules = externalModuleCount
+ ,source.incomingDependentPackages = size(projectNames)
+ // Incoming dependencies properties can't easily be set on sourceExternal nodes
+ // since there might be more than one per source. If this is needed in future
+ // assure that there is no regression for the source nodes.
+RETURN source.globalFqn AS fullQualifiedModuleName
+ ,source.name AS moduleName
+ ,declarationCount AS incomingDependencies
+ ,totalCardinality AS incomingDependenciesWeight
+ ,abstractTypeCount AS incomingDependentAbstractTypes
+ ,abstractTypeCardinality AS incomingDependentAbstractTypeWeight
+ ,externalModuleCount AS incomingDependentModules
+ ,size(projectNames) AS incomingDependentPackages
+ ,externalModuleExamples
+ ,projectNames
+ORDER BY incomingDependencies DESC, fullQualifiedModuleName ASC // modules with most incoming dependencies first
diff --git a/cypher/Metrics/Set_Outgoing_Package_Dependencies.cypher b/cypher/Metrics/Set_Outgoing_Java_Package_Dependencies.cypher
similarity index 94%
rename from cypher/Metrics/Set_Outgoing_Package_Dependencies.cypher
rename to cypher/Metrics/Set_Outgoing_Java_Package_Dependencies.cypher
index 2d658931d..542ac5de9 100644
--- a/cypher/Metrics/Set_Outgoing_Package_Dependencies.cypher
+++ b/cypher/Metrics/Set_Outgoing_Java_Package_Dependencies.cypher
@@ -1,6 +1,6 @@
//Set Outgoing Package Dependencies
- MATCH (p:Package)
+ MATCH (p:Java:Package)
MATCH (artifact:Artifact)-[:CONTAINS]->(p)
OPTIONAL MATCH (p)-[:CONTAINS]->(it:Java:Type)-[r:DEPENDS_ON]->(et:Java:Type)<-[:CONTAINS]-(ep:Package)<-[:CONTAINS]-(ea:Artifact)
OPTIONAL MATCH (it)-[:DEPENDS_ON]->(eti:Java:Type:Interface)
@@ -23,7 +23,8 @@ ORDER BY outgoingDependencies DESC, p.fqn ASC // package with most incoming depe
,p.outgoingDependentPackages = outgoingDependentPackages
,p.outgoingDependentArtifacts = outgoingDependentArtifacts
RETURN artifactName
- ,p.fqn AS packageName
+ ,p.fqn AS fullQualifiedPackageName
+ ,p.name AS packageName
,outgoingDependencies
,outgoingDependenciesWeight
,outgoingDependentTypes
diff --git a/cypher/Metrics/Set_Outgoing_Package_Dependencies_Including_Subpackages.cypher b/cypher/Metrics/Set_Outgoing_Java_Package_Dependencies_Including_Subpackages.cypher
similarity index 88%
rename from cypher/Metrics/Set_Outgoing_Package_Dependencies_Including_Subpackages.cypher
rename to cypher/Metrics/Set_Outgoing_Java_Package_Dependencies_Including_Subpackages.cypher
index 930fda7ef..f8100c9df 100644
--- a/cypher/Metrics/Set_Outgoing_Package_Dependencies_Including_Subpackages.cypher
+++ b/cypher/Metrics/Set_Outgoing_Java_Package_Dependencies_Including_Subpackages.cypher
@@ -1,6 +1,6 @@
// Set Outgoing Package Dependencies including sub-packages
- MATCH (p:Package)
+ MATCH (p:Java:Package)
WITH *
,EXISTS{
MATCH (p)<-[:CONTAINS]-(ancestor:Package)-[:CONTAINS]->(sibling:Package)
@@ -9,7 +9,7 @@
,EXISTS{(p)-[:CONTAINS]->(:Type)} AS containsTypes
WHERE hasSiblingPackages OR containsTypes
MATCH (artifact:Artifact)-[:CONTAINS]->(p)
-OPTIONAL MATCH (p:Package)-[:CONTAINS*0..]->(sp:Package)-[:CONTAINS]->(st:Java:Type)-[r:DEPENDS_ON]->(et:Java:Type)<-[:CONTAINS]-(ep:Package)<-[:CONTAINS]-(ea:Artifact)
+OPTIONAL MATCH (p)-[:CONTAINS*0..]->(sp:Package)-[:CONTAINS]->(st:Java:Type)-[r:DEPENDS_ON]->(et:Java:Type)<-[:CONTAINS]-(ep:Package)<-[:CONTAINS]-(ea:Artifact)
WHERE NOT ep.fqn starts with p.fqn + '.'
AND ep.fqn <> p.fqn
// AND p.outgoingDependenciesIncludingSubpackages IS NULL // comment out to recalculate
@@ -30,7 +30,8 @@ ORDER BY outgoingDependencies DESC, p.fqn ASC // package with most incoming depe
,p.outgoingDependentPackagesIncludingSubpackages = outgoingDependentPackages
,p.outgoingDependentArtifactsIncludingSubpackages = outgoingDependentArtifacts
RETURN artifactName
- ,p.fqn AS packageName
+ ,p.fqn AS fullQualifiedPackageName
+ ,p.name AS packageName
,outgoingDependencies
,outgoingDependenciesWeight
,outgoingDependentTypes
diff --git a/cypher/Metrics/Set_Outgoing_Package_Method_Call_Dependencies.cypher b/cypher/Metrics/Set_Outgoing_Java_Package_Method_Call_Dependencies.cypher
similarity index 98%
rename from cypher/Metrics/Set_Outgoing_Package_Method_Call_Dependencies.cypher
rename to cypher/Metrics/Set_Outgoing_Java_Package_Method_Call_Dependencies.cypher
index 5f98e24c8..4391b2913 100644
--- a/cypher/Metrics/Set_Outgoing_Package_Method_Call_Dependencies.cypher
+++ b/cypher/Metrics/Set_Outgoing_Java_Package_Method_Call_Dependencies.cypher
@@ -1,5 +1,6 @@
//Set Outgoing Package Method Call Dependencies
-MATCH (p:Package)
+
+MATCH (p:Java:Package)
OPTIONAL MATCH (p)-[:CONTAINS]->(t:Java:Type)-[:DECLARES]->(m:Method)-[:INVOKES]->(dm:Method)<-[:DECLARES]-(dt:Java:Type)<-[:CONTAINS]-(dp:Package)<-[:CONTAINS]-(da:Artifact)
OPTIONAL MATCH (dm)<-[:DECLARES]-(dti:Interface)
WHERE p <> dp
diff --git a/cypher/Metrics/Set_Outgoing_Type_Dependencies.cypher b/cypher/Metrics/Set_Outgoing_Java_Type_Dependencies.cypher
similarity index 98%
rename from cypher/Metrics/Set_Outgoing_Type_Dependencies.cypher
rename to cypher/Metrics/Set_Outgoing_Java_Type_Dependencies.cypher
index afbe1210a..0bdcd14d9 100644
--- a/cypher/Metrics/Set_Outgoing_Type_Dependencies.cypher
+++ b/cypher/Metrics/Set_Outgoing_Java_Type_Dependencies.cypher
@@ -1,6 +1,6 @@
//Set Outgoing Type Dependencies
- MATCH (p:Package)
+ MATCH (p:Java:Package)
OPTIONAL MATCH (p)-[:CONTAINS]->(it:Java:Type)-[r:DEPENDS_ON]->(et:Java:Type)<-[:CONTAINS]-(ep:Package)<-[:CONTAINS]-(ea:Artifact)
OPTIONAL MATCH (it)-[:DEPENDS_ON]->(eti:Type:Interface)
WHERE it <> et
diff --git a/cypher/Metrics/Set_Outgoing_Typescript_Module_Dependencies.cypher b/cypher/Metrics/Set_Outgoing_Typescript_Module_Dependencies.cypher
new file mode 100644
index 000000000..cbab604d1
--- /dev/null
+++ b/cypher/Metrics/Set_Outgoing_Typescript_Module_Dependencies.cypher
@@ -0,0 +1,34 @@
+// Set outgoing Typscript Module dependencies
+
+// Get the top level dependency between a Typescript module and an external modules it uses
+ MATCH (source:TS:Module)
+OPTIONAL MATCH (source)-[moduleDependency:DEPENDS_ON]->(target:ExternalModule)
+ WHERE NOT EXISTS {(target)-[:RESOLVES_TO]->(source)}
+// Get the project of the external module if available
+OPTIONAL MATCH (projectdir:Directory)<-[:HAS_ROOT]-(project:TS:Project)-[:CONTAINS]->(:TS:Module)<-[:RESOLVES_TO]-(target)
+// Aggregate all gathered information for each (grouped by) source module
+ WITH source
+ ,collect(DISTINCT projectdir.absoluteFileName) AS projectNames
+ ,count(DISTINCT target.globalFqn) AS externalModuleCount
+ ,sum(moduleDependency.declarationCount) AS declarationCount
+ ,sum(moduleDependency.abstractTypeCount) AS abstractTypeCount
+ ,sum(moduleDependency.cardinality) AS totalCardinality
+ ,sum(moduleDependency.abstractTypeCardinality) AS abstractTypeCardinality
+ ,collect(DISTINCT target.globalFqn)[0..4] AS externalModuleExamples
+ SET source.outgoingDependencies = declarationCount
+ ,source.outgoingDependenciesWeight = totalCardinality
+ ,source.outgoingDependentAbstractTypes = abstractTypeCount
+ ,source.outgoingDependentAbstractTypeWeight = abstractTypeCardinality
+ ,source.outgoingDependentModules = externalModuleCount
+ ,source.outgoingDependentPackages = size(projectNames)
+RETURN source.globalFqn AS fullQualifiedModuleName
+ ,source.name AS moduleName
+ ,declarationCount AS outgoingDependencies
+ ,totalCardinality AS outgoingDependenciesWeight
+ ,abstractTypeCount AS outgoingDependentAbstractTypes
+ ,abstractTypeCardinality AS outgoingDependentAbstractTypeWeight
+ ,externalModuleCount AS outgoingDependentModules
+ ,size(projectNames) AS outgoingDependentPackages
+ ,externalModuleExamples
+ ,projectNames
+ORDER BY outgoingDependencies DESC, fullQualifiedModuleName ASC
diff --git a/cypher/Adding_the_artifact_name_temporarily_to_a_new_virtual_node_using_APOC.cypher b/cypher/Miscellaneous/Adding_the_artifact_name_temporarily_to_a_new_virtual_node_using_APOC.cypher
similarity index 100%
rename from cypher/Adding_the_artifact_name_temporarily_to_a_new_virtual_node_using_APOC.cypher
rename to cypher/Miscellaneous/Adding_the_artifact_name_temporarily_to_a_new_virtual_node_using_APOC.cypher
diff --git a/cypher/Adding_the_artifact_name_temporarily_to_the_Package_node_using_map_projection.cypher b/cypher/Miscellaneous/Adding_the_artifact_name_temporarily_to_the_Package_node_using_map_projection.cypher
similarity index 100%
rename from cypher/Adding_the_artifact_name_temporarily_to_the_Package_node_using_map_projection.cypher
rename to cypher/Miscellaneous/Adding_the_artifact_name_temporarily_to_the_Package_node_using_map_projection.cypher
diff --git a/cypher/Extract_Custom_Manifest_Entries.cypher b/cypher/Miscellaneous/Extract_Custom_Manifest_Entries.cypher
similarity index 100%
rename from cypher/Extract_Custom_Manifest_Entries.cypher
rename to cypher/Miscellaneous/Extract_Custom_Manifest_Entries.cypher
diff --git a/cypher/Get_Awesome_Procedures_On_Cypher_APOC_Version.cypher b/cypher/Miscellaneous/Get_Awesome_Procedures_On_Cypher_APOC_Version.cypher
similarity index 100%
rename from cypher/Get_Awesome_Procedures_On_Cypher_APOC_Version.cypher
rename to cypher/Miscellaneous/Get_Awesome_Procedures_On_Cypher_APOC_Version.cypher
diff --git a/cypher/Get_Graph_Data_Science_Library_Version.cypher b/cypher/Miscellaneous/Get_Graph_Data_Science_Library_Version.cypher
similarity index 100%
rename from cypher/Get_Graph_Data_Science_Library_Version.cypher
rename to cypher/Miscellaneous/Get_Graph_Data_Science_Library_Version.cypher
diff --git a/cypher/Get_Graph_Data_Science_System_Information.cypher b/cypher/Miscellaneous/Get_Graph_Data_Science_System_Information.cypher
similarity index 100%
rename from cypher/Get_Graph_Data_Science_System_Information.cypher
rename to cypher/Miscellaneous/Get_Graph_Data_Science_System_Information.cypher
diff --git a/cypher/Set_artifactName_property_on_every_Package_node.cypher b/cypher/Miscellaneous/Set_artifactName_property_on_every_Package_node.cypher
similarity index 100%
rename from cypher/Set_artifactName_property_on_every_Package_node.cypher
rename to cypher/Miscellaneous/Set_artifactName_property_on_every_Package_node.cypher
diff --git a/cypher/Overview/Words_for_universal_Wordcloud.cypher b/cypher/Overview/Words_for_universal_Wordcloud.cypher
new file mode 100644
index 000000000..914635829
--- /dev/null
+++ b/cypher/Overview/Words_for_universal_Wordcloud.cypher
@@ -0,0 +1,17 @@
+// Words for universal Wordcloud
+
+MATCH (named:!Key&!Primitive&!PrimitiveType&!Void&!JavaType&!ResolvedDuplicateType&!ExternalType)
+WHERE named.name > ''
+ AND named.name <> 'package-info'
+ AND named.name <> ''
+ AND named.name <> ''
+ WITH apoc.text.replace(named.name, '(? 1
+RETURN word
+// ,count(*) as numberOfAppearances
+//ORDER BY numberOfAppearances DESC, word
\ No newline at end of file
diff --git a/cypher/Typescript_Enrichment/Add_DEPENDS_ON_relationship_to_resolved_modules.cypher b/cypher/Typescript_Enrichment/Add_DEPENDS_ON_relationship_to_resolved_modules.cypher
new file mode 100644
index 000000000..c607c8885
--- /dev/null
+++ b/cypher/Typescript_Enrichment/Add_DEPENDS_ON_relationship_to_resolved_modules.cypher
@@ -0,0 +1,12 @@
+// Propagates "DEPENDS_ON" relations between modules to their resolved modules with a property "resolved:true".
+// Inspired by https://github.com/jQAssistant/jqa-java-plugin/blob/f092122b62bb13d597840b64b73b2010bd074d1f/src/main/resources/META-INF/jqassistant-rules/java-classpath.xml#L59
+
+ MATCH (module:TS:Module)-[dependsOn:DEPENDS_ON]->(externalModule:TS:ExternalModule)
+ MATCH (externalModule)-[:RESOLVES_TO]->(resolvedModule:TS:Module)
+ WHERE module <> resolvedModule
+ CALL { WITH module, dependsOn, resolvedModule
+ MERGE (module)-[resolvedDependsOn:DEPENDS_ON]->(resolvedModule)
+ SET resolvedDependsOn = dependsOn
+ ,resolvedDependsOn.resolved=true
+ } IN TRANSACTIONS
+RETURN count(*) as resolvedDependencies
\ No newline at end of file
diff --git a/cypher/Typescript_Enrichment/Add_RESOLVES_TO_relationship_for_matching_declarations.cypher b/cypher/Typescript_Enrichment/Add_RESOLVES_TO_relationship_for_matching_declarations.cypher
new file mode 100644
index 000000000..32757c8f5
--- /dev/null
+++ b/cypher/Typescript_Enrichment/Add_RESOLVES_TO_relationship_for_matching_declarations.cypher
@@ -0,0 +1,12 @@
+// Adds a relation "RESOLVES_TO" from a Typescript element to an external declaration if their global fully qualified names match.
+// Inspired by https://github.com/jQAssistant/jqa-java-plugin/blob/f092122b62bb13d597840b64b73b2010bd074d1f/src/main/resources/META-INF/jqassistant-rules/java-classpath.xml#L5
+
+MATCH (element:TS&!ExternalDeclaration)
+MATCH (external:TS&ExternalDeclaration)
+WHERE (element.globalFqn = external.globalFqn
+ OR toLower(element.globalFqn) = toLower(external.globalFqn))
+ AND element <> external
+ CALL { WITH element, external
+ MERGE (external)-[:RESOLVES_TO]->(element)
+ } IN TRANSACTIONS
+RETURN count(*) AS resolvedElements
\ No newline at end of file
diff --git a/cypher/Typescript_Enrichment/Add_RESOLVES_TO_relationship_for_matching_modules.cypher b/cypher/Typescript_Enrichment/Add_RESOLVES_TO_relationship_for_matching_modules.cypher
new file mode 100644
index 000000000..63df300f4
--- /dev/null
+++ b/cypher/Typescript_Enrichment/Add_RESOLVES_TO_relationship_for_matching_modules.cypher
@@ -0,0 +1,14 @@
+// Adds a relation "RESOLVES_TO" from an external module to a module if their global fully qualified names match.
+// Inspired by https://github.com/jQAssistant/jqa-java-plugin/blob/f092122b62bb13d597840b64b73b2010bd074d1f/src/main/resources/META-INF/jqassistant-rules/java-classpath.xml#L5
+
+MATCH (module:TS:Module)
+MATCH (externalModule:TS:ExternalModule)
+WHERE (toLower(module.globalFqn) = toLower(externalModule.globalFqn)
+ OR toLower(module.globalFqn) = split(toLower(externalModule.globalFqn), '/index.')[0]
+ OR toLower(externalModule.globalFqn) = split(toLower(module.globalFqn), '/index.')[0]
+ )
+ AND module <> externalModule
+ CALL { WITH module, externalModule
+ MERGE (externalModule)-[:RESOLVES_TO]->(module)
+ } IN TRANSACTIONS
+RETURN count(*) AS resolvedModules
\ No newline at end of file
diff --git a/cypher/Typescript_Enrichment/Add_name_and_module_properties.cypher b/cypher/Typescript_Enrichment/Add_name_and_module_properties.cypher
new file mode 100644
index 000000000..84eacdd40
--- /dev/null
+++ b/cypher/Typescript_Enrichment/Add_name_and_module_properties.cypher
@@ -0,0 +1,13 @@
+// Add "name" and "module" properties to Typescript nodes that have a globalFqn property
+
+ MATCH (ts:TS)
+ WHERE ts.globalFqn IS NOT NULL
+ WITH ts
+ ,replace(split(ts.globalFqn, '".')[0],'"', '') AS moduleName
+ ,replace(split(ts.globalFqn, '/index')[0],'"', '') AS moduleNameWithoutIndex
+ ,split(ts.globalFqn, '".')[1] AS symbolName
+ WITH *
+ ,reverse(split(reverse(moduleNameWithoutIndex), '/')[0]) AS indexedName
+ SET ts.module = moduleName
+ ,ts.name = coalesce(symbolName, indexedName)
+RETURN count(*) AS updatedModules
\ No newline at end of file
diff --git a/graph-visualization/visualization-pagination.js b/graph-visualization/visualization-pagination.js
index 1fda9bf0d..2e3f3c099 100644
--- a/graph-visualization/visualization-pagination.js
+++ b/graph-visualization/visualization-pagination.js
@@ -51,10 +51,14 @@ function paginatedGraphVisualization({
neoViz.registerOnEvent(NeoVis.NeoVisEvents.CompletionEvent, (event) => {
if (event.recordCount == 0) {
- if (index=0) {
- log.error('No query results. Nothing to visualize. Check the query and if the nodes and properties have been written.')
+ if (index === 0) {
+ const message = 'No query results. Nothing to visualize. Check the query and if the nodes and properties have been written.';
+ console.warn(message)
+ indexedVisualizationContainer.classList.add(classOfFailedVisualization);
+ indexedVisualizationContainer.textContent = message;
+ } else {
+ indexedVisualizationContainer.remove(); // remove an empty canvas
}
- indexedVisualizationContainer.remove(); // remove an empty canvas
markVisualizationAsFinished(indexedVisualizationContainer, 'No query results (anymore)');
} else {
setTimeout(() => {
diff --git a/jupyter/ExternalDependencies.ipynb b/jupyter/ExternalDependencies.ipynb
index 6e03d980a..7ea81ed19 100644
--- a/jupyter/ExternalDependencies.ipynb
+++ b/jupyter/ExternalDependencies.ipynb
@@ -238,20 +238,23 @@
"metadata": {},
"outputs": [],
"source": [
- "plot.figure();\n",
- "\n",
- "axis = external_package_by_type_usage_significant.plot(\n",
- " kind='pie',\n",
- " title='Top external package usage [%] by type',\n",
- " legend=True,\n",
- " labeldistance=None,\n",
- " autopct='%1.2f%%',\n",
- " textprops={'fontsize': 5},\n",
- " pctdistance=1.2,\n",
- " cmap=main_color_map\n",
- ")\n",
- "axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
- "plot.show()"
+ "if external_package_by_type_usage_significant.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ "\n",
+ " axis = external_package_by_type_usage_significant.plot(\n",
+ " kind='pie',\n",
+ " title='Top external package usage [%] by type',\n",
+ " legend=True,\n",
+ " labeldistance=None,\n",
+ " autopct='%1.2f%%',\n",
+ " textprops={'fontsize': 5},\n",
+ " pctdistance=1.2,\n",
+ " cmap=main_color_map\n",
+ " )\n",
+ " axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
+ " plot.show()"
]
},
{
@@ -287,20 +290,23 @@
"metadata": {},
"outputs": [],
"source": [
- "plot.figure();\n",
- "\n",
- "axis = external_package_by_package_usage_significant.plot(\n",
- " kind='pie',\n",
- " title='Top external package usage [%] by package',\n",
- " legend=True,\n",
- " labeldistance=None,\n",
- " autopct='%1.2f%%',\n",
- " textprops={'fontsize': 5},\n",
- " pctdistance=1.2,\n",
- " cmap=main_color_map\n",
- ")\n",
- "axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
- "plot.show()"
+ "if external_package_by_package_usage_significant.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ "\n",
+ " axis = external_package_by_package_usage_significant.plot(\n",
+ " kind='pie',\n",
+ " title='Top external package usage [%] by package',\n",
+ " legend=True,\n",
+ " labeldistance=None,\n",
+ " autopct='%1.2f%%',\n",
+ " textprops={'fontsize': 5},\n",
+ " pctdistance=1.2,\n",
+ " cmap=main_color_map\n",
+ " )\n",
+ " axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
+ " plot.show()"
]
},
{
@@ -364,20 +370,23 @@
" threshold= 0.7\n",
");\n",
"\n",
- "plot.figure();\n",
- "\n",
- "axis = external_grouped_package_by_type_usage_significant.plot(\n",
- " kind='pie',\n",
- " title='Top external package (grouped by first 2 layers) usage [%] by type',\n",
- " legend=True,\n",
- " labeldistance=None,\n",
- " autopct='%1.2f%%',\n",
- " textprops={'fontsize': 5},\n",
- " pctdistance=1.2,\n",
- " cmap=main_color_map\n",
- ")\n",
- "axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
- "plot.show()"
+ "if external_grouped_package_by_type_usage_significant.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ "\n",
+ " axis = external_grouped_package_by_type_usage_significant.plot(\n",
+ " kind='pie',\n",
+ " title='Top external package (grouped by first 2 layers) usage [%] by type',\n",
+ " legend=True,\n",
+ " labeldistance=None,\n",
+ " autopct='%1.2f%%',\n",
+ " textprops={'fontsize': 5},\n",
+ " pctdistance=1.2,\n",
+ " cmap=main_color_map\n",
+ " )\n",
+ " axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
+ " plot.show()"
]
},
{
@@ -405,19 +414,22 @@
" threshold= 0.7\n",
");\n",
"\n",
- "plot.figure();\n",
- "axis = external_grouped_package_by_package_usage_significant.plot(\n",
- " kind='pie',\n",
- " title='Top external package (grouped by first 2 layers) usage [%] by package',\n",
- " legend=True,\n",
- " labeldistance=None,\n",
- " autopct='%1.2f%%',\n",
- " textprops={'fontsize': 5},\n",
- " pctdistance=1.2,\n",
- " cmap=main_color_map\n",
- ")\n",
- "axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
- "plot.show()"
+ "if external_grouped_package_by_package_usage_significant.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ " axis = external_grouped_package_by_package_usage_significant.plot(\n",
+ " kind='pie',\n",
+ " title='Top external package (grouped by first 2 layers) usage [%] by package',\n",
+ " legend=True,\n",
+ " labeldistance=None,\n",
+ " autopct='%1.2f%%',\n",
+ " textprops={'fontsize': 5},\n",
+ " pctdistance=1.2,\n",
+ " cmap=main_color_map\n",
+ " )\n",
+ " axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
+ " plot.show()"
]
},
{
@@ -568,19 +580,22 @@
" threshold= 0.5\n",
");\n",
"\n",
- "plot.figure();\n",
- "axis = external_package_type_usage_spread_significant.plot(\n",
- " kind='pie',\n",
- " title='Top external package usage spread [%] by type',\n",
- " legend=True,\n",
- " labeldistance=None,\n",
- " autopct='%1.2f%%',\n",
- " textprops={'fontsize': 5},\n",
- " pctdistance=1.2,\n",
- " cmap=main_color_map\n",
- ")\n",
- "axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
- "plot.show()"
+ "if external_package_type_usage_spread_significant.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ " axis = external_package_type_usage_spread_significant.plot(\n",
+ " kind='pie',\n",
+ " title='Top external package usage spread [%] by type',\n",
+ " legend=True,\n",
+ " labeldistance=None,\n",
+ " autopct='%1.2f%%',\n",
+ " textprops={'fontsize': 5},\n",
+ " pctdistance=1.2,\n",
+ " cmap=main_color_map\n",
+ " )\n",
+ " axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
+ " plot.show()"
]
},
{
@@ -607,19 +622,22 @@
" threshold= 0.5\n",
");\n",
"\n",
- "plot.figure();\n",
- "axis = external_package_usage_package_spread_significant.plot(\n",
- " kind='pie',\n",
- " title='Top external package usage spread [%] by package',\n",
- " legend=True,\n",
- " labeldistance=None,\n",
- " autopct='%1.2f%%',\n",
- " textprops={'fontsize': 5},\n",
- " pctdistance=1.2,\n",
- " cmap=main_color_map\n",
- ")\n",
- "axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
- "plot.show()"
+ "if external_package_usage_package_spread_significant.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ " axis = external_package_usage_package_spread_significant.plot(\n",
+ " kind='pie',\n",
+ " title='Top external package usage spread [%] by package',\n",
+ " legend=True,\n",
+ " labeldistance=None,\n",
+ " autopct='%1.2f%%',\n",
+ " textprops={'fontsize': 5},\n",
+ " pctdistance=1.2,\n",
+ " cmap=main_color_map\n",
+ " )\n",
+ " axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
+ " plot.show()"
]
},
{
@@ -685,20 +703,23 @@
" threshold= 0.5\n",
");\n",
"\n",
- "plot.figure();\n",
- "\n",
- "axis = external_grouped_package_type_usage_spread_significant.plot(\n",
- " kind='pie',\n",
- " title='Top external package (grouped by first 2 layers) usage spread [%] by type',\n",
- " legend=True,\n",
- " labeldistance=None,\n",
- " autopct='%1.2f%%',\n",
- " textprops={'fontsize': 5},\n",
- " pctdistance=1.2,\n",
- " cmap=main_color_map\n",
- ")\n",
- "axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
- "plot.show()"
+ "if external_grouped_package_type_usage_spread_significant.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ "\n",
+ " axis = external_grouped_package_type_usage_spread_significant.plot(\n",
+ " kind='pie',\n",
+ " title='Top external package (grouped by first 2 layers) usage spread [%] by type',\n",
+ " legend=True,\n",
+ " labeldistance=None,\n",
+ " autopct='%1.2f%%',\n",
+ " textprops={'fontsize': 5},\n",
+ " pctdistance=1.2,\n",
+ " cmap=main_color_map\n",
+ " )\n",
+ " axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
+ " plot.show()"
]
},
{
@@ -726,20 +747,23 @@
" threshold= 0.5\n",
");\n",
"\n",
- "plot.figure();\n",
- "\n",
- "axis = external_grouped_package_package_usage_spread_significant.plot(\n",
- " kind='pie',\n",
- " title='Top external package (grouped by first 2 layers) usage spread [%] by package',\n",
- " legend=True,\n",
- " labeldistance=None,\n",
- " autopct='%1.2f%%',\n",
- " textprops={'fontsize': 5},\n",
- " pctdistance=1.2,\n",
- " cmap=main_color_map\n",
- ")\n",
- "axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
- "plot.show()"
+ "if external_grouped_package_package_usage_spread_significant.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ "\n",
+ " axis = external_grouped_package_package_usage_spread_significant.plot(\n",
+ " kind='pie',\n",
+ " title='Top external package (grouped by first 2 layers) usage spread [%] by package',\n",
+ " legend=True,\n",
+ " labeldistance=None,\n",
+ " autopct='%1.2f%%',\n",
+ " textprops={'fontsize': 5},\n",
+ " pctdistance=1.2,\n",
+ " cmap=main_color_map\n",
+ " )\n",
+ " axis.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
+ " plot.show()"
]
},
{
@@ -971,18 +995,21 @@
"metadata": {},
"outputs": [],
"source": [
- "plot.figure();\n",
- "axes = external_packages_per_artifact_pivot.transpose().plot(\n",
- " kind='bar', \n",
- " grid=True,\n",
- " title='External package usage per artifact', \n",
- " xlabel='artifact',\n",
- " ylabel='number of packages',\n",
- " stacked=True,\n",
- " legend=True,\n",
- " cmap=main_color_map\n",
- ").legend(bbox_to_anchor=(1.0, 1.0))\n",
- "plot.show()"
+ "if external_packages_per_artifact_pivot.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ " axes = external_packages_per_artifact_pivot.transpose().plot(\n",
+ " kind='bar', \n",
+ " grid=True,\n",
+ " title='External package usage per artifact', \n",
+ " xlabel='artifact',\n",
+ " ylabel='number of packages',\n",
+ " stacked=True,\n",
+ " legend=True,\n",
+ " cmap=main_color_map\n",
+ " ).legend(bbox_to_anchor=(1.0, 1.0))\n",
+ " plot.show()"
]
},
{
@@ -1004,18 +1031,21 @@
"metadata": {},
"outputs": [],
"source": [
- "plot.figure();\n",
- "axes = external_second_level_packages_per_artifact_pivot.transpose().plot(\n",
- " kind='bar', \n",
- " grid=True,\n",
- " title='External package (first 2 levels) usage per artifact', \n",
- " xlabel='artifact',\n",
- " ylabel='number of packages',\n",
- " stacked=True,\n",
- " legend=True,\n",
- " cmap=main_color_map\n",
- ").legend(bbox_to_anchor=(1.0, 1.0))\n",
- "plot.show()"
+ "if external_second_level_packages_per_artifact_pivot.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ " axes = external_second_level_packages_per_artifact_pivot.transpose().plot(\n",
+ " kind='bar', \n",
+ " grid=True,\n",
+ " title='External package (first 2 levels) usage per artifact', \n",
+ " xlabel='artifact',\n",
+ " ylabel='number of packages',\n",
+ " stacked=True,\n",
+ " legend=True,\n",
+ " cmap=main_color_map\n",
+ " ).legend(bbox_to_anchor=(1.0, 1.0))\n",
+ " plot.show()"
]
},
{
@@ -1187,7 +1217,6 @@
"metadata": {},
"outputs": [],
"source": [
- "#external_package_usage_per_package_distribution = external_package_usage_per_artifact_distribution\n",
"external_package_usage_per_package_distribution = query_cypher_to_data_frame(\"../cypher/External_Dependencies/External_package_usage_per_internal_package_count.cypher\")\n",
"\n",
"# Only show external dependencies that are at least used in 2 internal packages. \n",
@@ -1364,34 +1393,36 @@
"metadata": {},
"outputs": [],
"source": [
- "plot.figure();\n",
- "axes = external_package_usage_aggregated.plot(\n",
- " kind='scatter',\n",
- " title='External package usage - max internal packages %', \n",
- " x='numberOfExternalPackages',\n",
- " y='maxNumberOfPackagesPercentage',\n",
- " s='artifactPackages',\n",
- " c='stdNumberOfPackagesPercentage',\n",
- " xlabel='external package count',\n",
- " ylabel='max percentage of internal packages',\n",
- " cmap=main_color_map,\n",
- ")\n",
- "\n",
- "# Annotate the largest artifact with the highest number of external packages and max number of packages in percentage\n",
- "annotation_index = index_of_sorted(highest=['numberOfExternalPackages','maxNumberOfPackagesPercentage'], data_frame=external_package_usage_aggregated)\n",
- "annotate_plot(external_package_usage_aggregated, annotation_index)\n",
+ "if external_package_usage_aggregated.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ " axes = external_package_usage_aggregated.plot(\n",
+ " kind='scatter',\n",
+ " title='External package usage - max internal packages %', \n",
+ " x='numberOfExternalPackages',\n",
+ " y='maxNumberOfPackagesPercentage',\n",
+ " s='artifactPackages',\n",
+ " c='stdNumberOfPackagesPercentage',\n",
+ " xlabel='external package count',\n",
+ " ylabel='max percentage of internal packages',\n",
+ " cmap=main_color_map,\n",
+ " )\n",
"\n",
+ " # Annotate the largest artifact with the highest number of external packages and max number of packages in percentage\n",
+ " annotation_index = index_of_sorted(highest=['numberOfExternalPackages','maxNumberOfPackagesPercentage'], data_frame=external_package_usage_aggregated)\n",
+ " annotate_plot(external_package_usage_aggregated, annotation_index)\n",
"\n",
- "# Annotate the largest artifact with the lowest number of external packages and the highest max number of packages in percentage\n",
- "annotation_index = index_of_sorted(highest=['maxNumberOfPackagesPercentage'], data_frame=external_package_usage_aggregated)\n",
- "annotate_plot(external_package_usage_aggregated, annotation_index)\n",
"\n",
- "# Annotate the largest artifact with the lowest number of external packages and max number of packages in percentage\n",
- "annotation_index = index_of_sorted(highest=[], data_frame=external_package_usage_aggregated)\n",
- "annotate_plot(external_package_usage_aggregated, annotation_index)\n",
+ " # Annotate the largest artifact with the lowest number of external packages and the highest max number of packages in percentage\n",
+ " annotation_index = index_of_sorted(highest=['maxNumberOfPackagesPercentage'], data_frame=external_package_usage_aggregated)\n",
+ " annotate_plot(external_package_usage_aggregated, annotation_index)\n",
"\n",
+ " # Annotate the largest artifact with the lowest number of external packages and max number of packages in percentage\n",
+ " annotation_index = index_of_sorted(highest=[], data_frame=external_package_usage_aggregated)\n",
+ " annotate_plot(external_package_usage_aggregated, annotation_index)\n",
"\n",
- "plot.show()"
+ " plot.show()"
]
},
{
@@ -1413,19 +1444,22 @@
"metadata": {},
"outputs": [],
"source": [
- "plot.figure();\n",
- "axes = external_package_usage_aggregated.plot(\n",
- " kind='scatter',\n",
- " title='External package usage - median internal packages %', \n",
- " x='numberOfExternalPackages',\n",
- " y='medNumberOfPackagesPercentage',\n",
- " s='artifactPackages',\n",
- " c='stdNumberOfPackagesPercentage',\n",
- " xlabel='external package count',\n",
- " ylabel='median percentage of internal packages',\n",
- " cmap=main_color_map,\n",
- ")\n",
- "plot.show()"
+ "if external_package_usage_aggregated.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ " axes = external_package_usage_aggregated.plot(\n",
+ " kind='scatter',\n",
+ " title='External package usage - median internal packages %', \n",
+ " x='numberOfExternalPackages',\n",
+ " y='medNumberOfPackagesPercentage',\n",
+ " s='artifactPackages',\n",
+ " c='stdNumberOfPackagesPercentage',\n",
+ " xlabel='external package count',\n",
+ " ylabel='median percentage of internal packages',\n",
+ " cmap=main_color_map,\n",
+ " )\n",
+ " plot.show()"
]
},
{
diff --git a/jupyter/InternalDependencies.ipynb b/jupyter/InternalDependencies.ipynb
index e0227513f..9c0def20b 100644
--- a/jupyter/InternalDependencies.ipynb
+++ b/jupyter/InternalDependencies.ipynb
@@ -63,7 +63,7 @@
"outputs": [],
"source": [
"def query_cypher_to_data_frame(filename : str, limit: int = 10_000):\n",
- " cypher_query_template = \"{query} LIMIT {row_limit}\"\n",
+ " cypher_query_template = \"{query}\\nLIMIT {row_limit}\"\n",
" cypher_query = get_cypher_query_from_file(filename)\n",
" cypher_query = cypher_query_template.format(query = cypher_query, row_limit = limit)\n",
" records, summary, keys = driver.execute_query(cypher_query)\n",
diff --git a/jupyter/MethodMetrics.ipynb b/jupyter/MethodMetrics.ipynb
index 4c6190699..4a215429b 100644
--- a/jupyter/MethodMetrics.ipynb
+++ b/jupyter/MethodMetrics.ipynb
@@ -196,25 +196,28 @@
"metadata": {},
"outputs": [],
"source": [
- "plot.figure();\n",
- "method_line_count_x_ticks=range(1,20)\n",
- "axes = effective_method_line_count_distribution_normalized.head(20).plot(\n",
- " kind='line',\n",
- " logx=True,\n",
- " grid=True,\n",
- " xlim=[2, 20],\n",
- " ylim=[0, 20],\n",
- " xticks=method_line_count_x_ticks,\n",
- " title='Effective Method Line Count Distribution', \n",
- " xlabel='effective line count',\n",
- " ylabel='percent of methods',\n",
- " cmap=main_color_map,\n",
- " figsize=(10, 6),\n",
- " lw=2,\n",
- ")\n",
- "axes.set_xticklabels(method_line_count_x_ticks)\n",
- "axes.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
- "plot.show()"
+ "if effective_method_line_count_distribution_normalized.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ " method_line_count_x_ticks=range(1,20)\n",
+ " axes = effective_method_line_count_distribution_normalized.head(20).plot(\n",
+ " kind='line',\n",
+ " logx=True,\n",
+ " grid=True,\n",
+ " xlim=[2, 20],\n",
+ " ylim=[0, 20],\n",
+ " xticks=method_line_count_x_ticks,\n",
+ " title='Effective Method Line Count Distribution', \n",
+ " xlabel='effective line count',\n",
+ " ylabel='percent of methods',\n",
+ " cmap=main_color_map,\n",
+ " figsize=(10, 6),\n",
+ " lw=2,\n",
+ " )\n",
+ " axes.set_xticklabels(method_line_count_x_ticks)\n",
+ " axes.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
+ " plot.show()"
]
},
{
@@ -371,27 +374,30 @@
"metadata": {},
"outputs": [],
"source": [
- "plot.figure();\n",
- "method_line_count_x_ticks=range(1,11)\n",
- "cyclomatic_complexity_y_ticks=[1, 2, 3, 4, 5, 7, 10, 20, 30, 40, 50, 100]\n",
- "axes = cyclomatic_method_complexity_distribution_normalized.plot(\n",
- " kind='line', \n",
- " logx=True,\n",
- " logy=True,\n",
- " grid=True,\n",
- " xlim=[1,11],\n",
- " ylim=[1,100],\n",
- " xticks=method_line_count_x_ticks,\n",
- " yticks=cyclomatic_complexity_y_ticks,\n",
- " title='Cyclomatic complexity distribution of methods', \n",
- " xlabel='cyclomatic complexity',\n",
- " ylabel='percentage of methods',\n",
- " cmap=main_color_map,\n",
- ")\n",
- "axes.set_xticklabels(method_line_count_x_ticks)\n",
- "axes.set_yticklabels(cyclomatic_complexity_y_ticks)\n",
- "axes.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
- "plot.show()"
+ "if cyclomatic_method_complexity_distribution_normalized.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ " method_line_count_x_ticks=range(1,11)\n",
+ " cyclomatic_complexity_y_ticks=[1, 2, 3, 4, 5, 7, 10, 20, 30, 40, 50, 100]\n",
+ " axes = cyclomatic_method_complexity_distribution_normalized.plot(\n",
+ " kind='line', \n",
+ " logx=True,\n",
+ " logy=True,\n",
+ " grid=True,\n",
+ " xlim=[1,11],\n",
+ " ylim=[1,100],\n",
+ " xticks=method_line_count_x_ticks,\n",
+ " yticks=cyclomatic_complexity_y_ticks,\n",
+ " title='Cyclomatic complexity distribution of methods', \n",
+ " xlabel='cyclomatic complexity',\n",
+ " ylabel='percentage of methods',\n",
+ " cmap=main_color_map,\n",
+ " )\n",
+ " axes.set_xticklabels(method_line_count_x_ticks)\n",
+ " axes.set_yticklabels(cyclomatic_complexity_y_ticks)\n",
+ " axes.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
+ " plot.show()"
]
},
{
diff --git a/jupyter/NodeEmbeddings.ipynb b/jupyter/NodeEmbeddings.ipynb
index 98eb64878..8a8194200 100644
--- a/jupyter/NodeEmbeddings.ipynb
+++ b/jupyter/NodeEmbeddings.ipynb
@@ -178,10 +178,12 @@
"metadata": {},
"outputs": [],
"source": [
- "query_cypher_to_data_frame(\"../cypher/Dependencies_Projection/Dependencies_1_Delete_Projection.cypher\", package_embeddings_parameters)\n",
- "query_cypher_to_data_frame(\"../cypher/Dependencies_Projection/Dependencies_2_Delete_Subgraph.cypher\", package_embeddings_parameters)\n",
- "query_cypher_to_data_frame(\"../cypher/Dependencies_Projection/Dependencies_4_Create_Undirected_Projection.cypher\", package_embeddings_parameters)\n",
- "query_cypher_to_data_frame(\"../cypher/Dependencies_Projection/Dependencies_5_Create_Subgraph.cypher\", package_embeddings_parameters)"
+ "isDataAvailable=not query_cypher_to_data_frame(\"../cypher/Dependencies_Projection/Dependencies_0_Check_Projectable.cypher\", package_embeddings_parameters).empty\n",
+ "if isDataAvailable: \n",
+ " query_cypher_to_data_frame(\"../cypher/Dependencies_Projection/Dependencies_1_Delete_Projection.cypher\", package_embeddings_parameters)\n",
+ " query_cypher_to_data_frame(\"../cypher/Dependencies_Projection/Dependencies_2_Delete_Subgraph.cypher\", package_embeddings_parameters)\n",
+ " query_cypher_to_data_frame(\"../cypher/Dependencies_Projection/Dependencies_4_Create_Undirected_Projection.cypher\", package_embeddings_parameters)\n",
+ " query_cypher_to_data_frame(\"../cypher/Dependencies_Projection/Dependencies_5_Create_Subgraph.cypher\", package_embeddings_parameters)"
]
},
{
@@ -203,9 +205,11 @@
"metadata": {},
"outputs": [],
"source": [
- "\n",
- "fast_random_projection = query_cypher_to_data_frame(\"../cypher/Node_Embeddings/Node_Embeddings_1d_Fast_Random_Projection_Stream.cypher\", package_embeddings_parameters)\n",
- "fast_random_projection.head() # Look at the first entries of the table \n"
+ "if isDataAvailable:\n",
+ " fast_random_projection = query_cypher_to_data_frame(\"../cypher/Node_Embeddings/Node_Embeddings_1d_Fast_Random_Projection_Stream.cypher\", package_embeddings_parameters)\n",
+ " fast_random_projection.head() # Look at the first entries of the table\n",
+ "else:\n",
+ " print(\"No data available\")\n"
]
},
{
@@ -230,16 +234,19 @@
"metadata": {},
"outputs": [],
"source": [
- "# Calling the fit_transform method just with a list doesn't seem to work (anymore?). \n",
- "# It leads to an error with the following message: 'list' object has no attribute 'shape'\n",
- "# This can be solved by converting the list to a numpy array using np.array(..).\n",
- "# See https://bobbyhadz.com/blog/python-attributeerror-list-object-has-no-attribute-shape\n",
- "embeddings_as_numpy_array = np.array(fast_random_projection.embedding.to_list())\n",
+ "if isDataAvailable:\n",
+ " # Calling the fit_transform method just with a list doesn't seem to work (anymore?). \n",
+ " # It leads to an error with the following message: 'list' object has no attribute 'shape'\n",
+ " # This can be solved by converting the list to a numpy array using np.array(..).\n",
+ " # See https://bobbyhadz.com/blog/python-attributeerror-list-object-has-no-attribute-shape\n",
+ " embeddings_as_numpy_array = np.array(fast_random_projection.embedding.to_list())\n",
"\n",
- "# Use TSNE to reduce the dimensionality of the previous calculated node embeddings to 2 dimensions for visualization\n",
- "t_distributed_stochastic_neighbor_embedding = TSNE(n_components=2, verbose=1, random_state=50)\n",
- "two_dimension_node_embeddings = t_distributed_stochastic_neighbor_embedding.fit_transform(embeddings_as_numpy_array)\n",
- "two_dimension_node_embeddings.shape"
+ " # Use TSNE to reduce the dimensionality of the previous calculated node embeddings to 2 dimensions for visualization\n",
+ " t_distributed_stochastic_neighbor_embedding = TSNE(n_components=2, verbose=1, random_state=50)\n",
+ " two_dimension_node_embeddings = t_distributed_stochastic_neighbor_embedding.fit_transform(embeddings_as_numpy_array)\n",
+ " two_dimension_node_embeddings.shape\n",
+ "else:\n",
+ " print(\"No data available\")"
]
},
{
@@ -249,17 +256,20 @@
"metadata": {},
"outputs": [],
"source": [
- "# Create a new DataFrame with the results of the 2 dimensional node embeddings\n",
- "# and the code unit and artifact name of the query above as preparation for the plot\n",
- "node_embeddings_for_visualization = pd.DataFrame(data = {\n",
- " \"codeUnit\": fast_random_projection.codeUnitName,\n",
- " \"artifact\": fast_random_projection.artifactName,\n",
- " \"communityId\": fast_random_projection.communityId,\n",
- " \"centrality\": fast_random_projection.centrality,\n",
- " \"x\": [value[0] for value in two_dimension_node_embeddings],\n",
- " \"y\": [value[1] for value in two_dimension_node_embeddings]\n",
- "})\n",
- "node_embeddings_for_visualization.head()"
+ "if isDataAvailable:\n",
+ " # Create a new DataFrame with the results of the 2 dimensional node embeddings\n",
+ " # and the code unit and artifact name of the query above as preparation for the plot\n",
+ " node_embeddings_for_visualization = pd.DataFrame(data = {\n",
+ " \"codeUnit\": fast_random_projection.codeUnitName,\n",
+ " \"artifact\": fast_random_projection.artifactName,\n",
+ " \"communityId\": fast_random_projection.communityId,\n",
+ " \"centrality\": fast_random_projection.centrality,\n",
+ " \"x\": [value[0] for value in two_dimension_node_embeddings],\n",
+ " \"y\": [value[1] for value in two_dimension_node_embeddings]\n",
+ " })\n",
+ " node_embeddings_for_visualization.head()\n",
+ "else:\n",
+ " print(\"No data available\")"
]
},
{
@@ -269,15 +279,18 @@
"metadata": {},
"outputs": [],
"source": [
- "plot.scatter(\n",
- " x=node_embeddings_for_visualization.x,\n",
- " y=node_embeddings_for_visualization.y,\n",
- " s=node_embeddings_for_visualization.centrality * 200,\n",
- " c=node_embeddings_for_visualization.communityId,\n",
- " cmap=main_color_map,\n",
- ")\n",
- "plot.title(\"Package nodes positioned by their dependency relationships using t-SNE\")\n",
- "plot.show()"
+ "if isDataAvailable:\n",
+ " plot.scatter(\n",
+ " x=node_embeddings_for_visualization.x,\n",
+ " y=node_embeddings_for_visualization.y,\n",
+ " s=node_embeddings_for_visualization.centrality * 200,\n",
+ " c=node_embeddings_for_visualization.communityId,\n",
+ " cmap=main_color_map,\n",
+ " )\n",
+ " plot.title(\"Package nodes positioned by their dependency relationships using t-SNE\")\n",
+ " plot.show()\n",
+ "else:\n",
+ " print(\"No data to plot\")"
]
}
],
diff --git a/jupyter/ObjectOrientedDesignMetrics.ipynb b/jupyter/ObjectOrientedDesignMetrics.ipynb
index 01d1bfd32..33e03791e 100644
--- a/jupyter/ObjectOrientedDesignMetrics.ipynb
+++ b/jupyter/ObjectOrientedDesignMetrics.ipynb
@@ -53,8 +53,8 @@
"metadata": {},
"outputs": [],
"source": [
- "def get_cypher_query_from_file(cypherFileName):\n",
- " with open(cypherFileName) as file:\n",
+ "def get_cypher_query_from_file(cypher_file_name: str):\n",
+ " with open(cypher_file_name) as file:\n",
" return ' '.join(file.readlines())"
]
},
@@ -65,11 +65,35 @@
"metadata": {},
"outputs": [],
"source": [
- "def query_cypher_to_data_frame(filename):\n",
- " records, summary, keys = driver.execute_query(get_cypher_query_from_file(filename))\n",
+ "def query_cypher_to_data_frame(filename : str, limit: int = 10_000):\n",
+ " cypher_query_template = \"{query}\\nLIMIT {row_limit}\"\n",
+ " cypher_query = get_cypher_query_from_file(filename)\n",
+ " cypher_query = cypher_query_template.format(query = cypher_query, row_limit = limit)\n",
+ " records, summary, keys = driver.execute_query(cypher_query)\n",
" return pd.DataFrame([r.values() for r in records], columns=keys)"
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "013395f1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def query_first_non_empty_cypher_to_data_frame(*filenames : str, limit: int = 10_000):\n",
+ " \"\"\"\n",
+ " Executes the Cypher queries of the given files and returns the first result that is not empty.\n",
+ " If all given file names result in empty results, the last (empty) result will be returned.\n",
+ " By additionally specifying \"limit=\" the \"LIMIT\" keyword will appended to query so that only the first results get returned.\n",
+ " \"\"\" \n",
+ " result=pd.DataFrame()\n",
+ " for filename in filenames:\n",
+ " result=query_cypher_to_data_frame(filename, limit)\n",
+ " if not result.empty:\n",
+ " return result\n",
+ " return result"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
@@ -113,10 +137,16 @@
" \n",
"If these packages get changed, the incoming dependencies might be affected by the change. The more incoming dependencies, the harder it gets to change the code without the need to adapt the dependent code (βrigid codeβ). Even worse, it might affect the behavior of the dependent code in an unwanted way (βfragile codeβ).\n",
"\n",
- "Since Java Packages are organized hierarchically, incoming dependencies can be count for every package in isolation or by including all of its sub-packages. The latter one is done without top level packages like for example \"org\" or \"org.company\" by assuring that only packages are considered that have other packages or types in the same hierarchy level (\"siblings\").\n",
- "\n",
+ "Since Java Packages are organized hierarchically, incoming dependencies can be count for every package in isolation or by including all of its sub-packages. The latter one is done without top level packages like for example \"org\" or \"org.company\" by assuring that only packages are considered that have other packages or types in the same hierarchy level (\"siblings\")."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b637f8c8",
+ "metadata": {},
+ "source": [
"#### Table 1a\n",
- "- Show the top 20 packages with the most incoming dependencies\n",
+ "- Show the top 20 Java Packages with the most incoming dependencies\n",
"- Set the \"incomingDependencies\" properties on Package nodes."
]
},
@@ -129,7 +159,8 @@
},
"outputs": [],
"source": [
- "query_cypher_to_data_frame(\"../cypher/Metrics/Set_Incoming_Package_Dependencies.cypher\").head(20)"
+ "query_first_non_empty_cypher_to_data_frame(\"../cypher/Metrics/Get_Incoming_Java_Package_Dependencies.cypher\",\n",
+ " \"../cypher/Metrics/Set_Incoming_Java_Package_Dependencies.cypher\", limit=20)"
]
},
{
@@ -138,7 +169,7 @@
"metadata": {},
"source": [
"#### Table 1b\n",
- "- Show the top 20 packages including their sub-packages with the most incoming dependencies\n",
+ "- Show the top 20 Java Packages including their sub-packages with the most incoming dependencies\n",
"- Set the property \"incomingDependenciesIncludingSubpackages\" on Package nodes."
]
},
@@ -149,7 +180,29 @@
"metadata": {},
"outputs": [],
"source": [
- "query_cypher_to_data_frame(\"../cypher/Metrics/Set_Incoming_Package_Dependencies_Including_Subpackages.cypher\").head(20)"
+ "query_first_non_empty_cypher_to_data_frame(\"../cypher/Metrics/Get_Incoming_Java_Package_Dependencies_Including_Subpackages.cypher\",\n",
+ " \"../cypher/Metrics/Set_Incoming_Java_Package_Dependencies_Including_Subpackages.cypher\", limit=20)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b27f225d",
+ "metadata": {},
+ "source": [
+ "#### Table 1c\n",
+ "- Show the top 20 Typescript modules with the most incoming dependencies\n",
+ "- Set the property \"incomingDependencies\" on Module nodes if not already done."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "98974b46",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "query_first_non_empty_cypher_to_data_frame(\"../cypher/Metrics/Get_Incoming_Typescript_Module_Dependencies.cypher\",\n",
+ " \"../cypher/Metrics/Set_Incoming_Typescript_Module_Dependencies.cypher\", limit=20)"
]
},
{
@@ -165,11 +218,17 @@
"\n",
"Code from other packages and libraries youβre depending on (outgoing) might change over time. The more outgoing changes, the more likely and frequently code changes are needed. This involves time and effort which can be reduced by automation of tests and version updates. Automated tests are crucial to reveal updates, that change the behavior of the code unexpectedly (βfragile codeβ). As soon as more effort is required, keeping up becomes difficult (βrigid codeβ). Not being able to use a newer version might not only restrict features, it can get problematic if there are security issues. This might force you to take βfast but uglyβ solutions into account which further increases technical dept.\n",
"\n",
- "Since Java Packages are organized hierarchically, outgoing dependencies can be count for every package in isolation or by including all of its sub-packages. The latter one is done without top level packages like for example \"org\" or \"org.company\" by assuring that only packages are considered that have other packages or types in the same hierarchy level (\"siblings\").\n",
- "\n",
+ "Since Java Packages are organized hierarchically, outgoing dependencies can be count for every package in isolation or by including all of its sub-packages. The latter one is done without top level packages like for example \"org\" or \"org.company\" by assuring that only packages are considered that have other packages or types in the same hierarchy level (\"siblings\")."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8cebfec5",
+ "metadata": {},
+ "source": [
"#### Table 2a\n",
"\n",
- "- Show the top 20 packages with the most outgoing dependencies\n",
+ "- Show the top 20 Java Packages with the most outgoing dependencies\n",
"- Set the \"outgoingDependencies\" properties on Package nodes."
]
},
@@ -180,7 +239,8 @@
"metadata": {},
"outputs": [],
"source": [
- "query_cypher_to_data_frame(\"../cypher/Metrics/Set_Outgoing_Package_Dependencies.cypher\").head(20)"
+ "query_first_non_empty_cypher_to_data_frame(\"../cypher/Metrics/Get_Outgoing_Java_Package_Dependencies.cypher\",\n",
+ " \"../cypher/Metrics/Set_Outgoing_Java_Package_Dependencies.cypher\", limit=20)"
]
},
{
@@ -190,7 +250,7 @@
"source": [
"#### Table 2b\n",
"\n",
- "- Show the top 20 packages including their sub-packages with the most outgoing dependencies\n",
+ "- Show the top 20 Java Packages including their sub-packages with the most outgoing dependencies\n",
"- Set the property \"outgoingDependenciesIncludingSubpackages\" on Package nodes."
]
},
@@ -201,7 +261,30 @@
"metadata": {},
"outputs": [],
"source": [
- "query_cypher_to_data_frame(\"../cypher/Metrics/Set_Outgoing_Package_Dependencies_Including_Subpackages.cypher\").head(20)"
+ "query_first_non_empty_cypher_to_data_frame(\"../cypher/Metrics/Get_Outgoing_Java_Package_Dependencies_Including_Subpackages.cypher\",\n",
+ " \"../cypher/Metrics/Set_Outgoing_Java_Package_Dependencies_Including_Subpackages.cypher\", limit=20)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "747cb31d",
+ "metadata": {},
+ "source": [
+ "#### Table 2c\n",
+ "\n",
+ "- Show the top 20 Typescript modules with the most outgoing dependencies\n",
+ "- Set the \"outgoingDependencies\" properties on Module nodes if not already done"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1be12a2e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "query_first_non_empty_cypher_to_data_frame(\"../cypher/Metrics/Get_Outgoing_Typescript_Module_Dependencies.cypher\",\n",
+ " \"../cypher/Metrics/Set_Outgoing_Typescript_Module_Dependencies.cypher\", limit=20)"
]
},
{
@@ -220,11 +303,17 @@
"\n",
"Conversely, high values approaching one indicate high *Instability*. With some outgoing dependencies but no incoming ones the *Instability* is denoted as maximally unstable. Such code units are easier to change without affecting other modules, making them more flexible and less prone to cascading changes throughout the system. If they are changed more often because of that, they are considered unstable.\n",
"\n",
- "Since Java Packages are organized hierarchically, *Instability* can be calculated for every package in isolation or by including all of its sub-packages. \n",
- "\n",
+ "Since Java Packages are organized hierarchically, *Instability* can be calculated for every package in isolation or by including all of its sub-packages. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "fafd1840",
+ "metadata": {},
+ "source": [
"#### Table 3a\n",
"\n",
- "- Show the top 20 packages with the lowest *Instability*\n",
+ "- Show the top 20 Java Packages with the lowest *Instability*\n",
"- Set the property \"instability\" on Package nodes. "
]
},
@@ -235,7 +324,8 @@
"metadata": {},
"outputs": [],
"source": [
- "query_cypher_to_data_frame(\"../cypher/Metrics/Calculate_and_set_Instability_outgoing_incoming_Dependencies.cypher\").head(20)"
+ "query_first_non_empty_cypher_to_data_frame(\"../cypher/Metrics/Get_Instability_for_Java.cypher\",\n",
+ " \"../cypher/Metrics/Calculate_and_set_Instability_for_Java.cypher\", limit=20)"
]
},
{
@@ -245,7 +335,7 @@
"source": [
"#### Table 3b\n",
"\n",
- "- Show the top 20 packages including their sub-packages with the lowest *Instability*\n",
+ "- Show the top 20 Java Packages including their sub-packages with the lowest *Instability*\n",
"- Set the property \"instabilityIncludingSubpackages\" on Package nodes. "
]
},
@@ -256,7 +346,30 @@
"metadata": {},
"outputs": [],
"source": [
- "query_cypher_to_data_frame(\"../cypher/Metrics/Calculate_and_set_Instability_Including_Subpackages.cypher\").head(20)"
+ "query_first_non_empty_cypher_to_data_frame(\"../cypher/Metrics/Get_Instability_for_Java_Including_Subpackages.cypher\",\n",
+ " \"../cypher/Metrics/Calculate_and_set_Instability_for_Java_Including_Subpackages.cypher\", limit=20)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "17c081d0",
+ "metadata": {},
+ "source": [
+ "#### Table 3c\n",
+ "\n",
+ "- Show the top 20 Typescript modules with the lowest *Instability*\n",
+ "- Set the property \"instability\" on Module nodes if not already done"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "77862c9e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "query_first_non_empty_cypher_to_data_frame(\"../cypher/Metrics/Get_Instability_for_Typescript.cypher\",\n",
+ " \"../cypher/Metrics/Calculate_and_set_Instability_for_Typescript.cypher\", limit=20)"
]
},
{
@@ -273,8 +386,14 @@
"\n",
"Zero *Abstractness* means that there are no abstract types or interfaces in the package. On the other hand, a value of one means that there are only abstract types.\n",
"\n",
- "Since Java Packages are organized hierarchically, *Abstractness* can be calculated for every package in isolation or by including all of its sub-packages. \n",
- "\n",
+ "Since Java Packages are organized hierarchically, *Abstractness* can be calculated for every package in isolation or by including all of its sub-packages. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6932b105",
+ "metadata": {},
+ "source": [
"#### Table 4a\n",
"\n",
"- Show the top 30 packages with the lowest *Abstractness*\n",
@@ -288,7 +407,8 @@
"metadata": {},
"outputs": [],
"source": [
- "abstractness=query_cypher_to_data_frame(\"../cypher/Metrics/Calculate_and_set_Abstractness_including_Counts.cypher\")\n",
+ "abstractness = query_first_non_empty_cypher_to_data_frame(\"../cypher/Metrics/Get_Abstractness_for_Java.cypher\",\n",
+ " \"../cypher/Metrics/Calculate_and_set_Abstractness_for_Java.cypher\")\n",
"abstractness.head(30)"
]
},
@@ -330,7 +450,8 @@
"metadata": {},
"outputs": [],
"source": [
- "abstractnessIncludingSubpackages=query_cypher_to_data_frame(\"../cypher/Metrics/Calculate_and_set_Abstractness_including_Subpackages.cypher\")\n",
+ "abstractnessIncludingSubpackages = query_first_non_empty_cypher_to_data_frame(\"../cypher/Metrics/Get_Abstractness_for_Java_including_Subpackages.cypher\",\n",
+ " \"../cypher/Metrics/Calculate_and_set_Abstractness_for_Java_including_Subpackages.cypher\")\n",
"abstractnessIncludingSubpackages.head(30)"
]
},
@@ -354,6 +475,28 @@
"abstractnessIncludingSubpackages.sort_values(by=['abstractness', 'maxSubpackageDepth', 'numberTypes'], ascending=[False, False, False]).head(30)"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "6f95770e",
+ "metadata": {},
+ "source": [
+ "#### Table 4e\n",
+ "\n",
+ "- Show the top 30 Typescript modules with the lowest *Abstractness*\n",
+ "- Set the property \"abstractness\" on Module nodes if not already done."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "65c36080",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "query_first_non_empty_cypher_to_data_frame(\"../cypher/Metrics/Get_Abstractness_for_Typescript.cypher\",\n",
+ " \"../cypher/Metrics/Calculate_and_set_Abstractness_for_Typescript.cypher\", limit=20)"
+ ]
+ },
{
"attachments": {},
"cell_type": "markdown",
@@ -378,7 +521,7 @@
"metadata": {},
"outputs": [],
"source": [
- "instabilityPerAbstractness = query_cypher_to_data_frame(\"../cypher/Metrics/Calculate_distance_between_abstractness_and_instability.cypher\")\n",
+ "instabilityPerAbstractness = query_cypher_to_data_frame(\"../cypher/Metrics/Calculate_distance_between_abstractness_and_instability_for_Java.cypher\")\n",
"instabilityPerAbstractness.head(30)"
]
},
@@ -399,10 +542,31 @@
"metadata": {},
"outputs": [],
"source": [
- "instabilityPerAbstractnessIncludingSubpackages = query_cypher_to_data_frame(\"../cypher/Metrics/Calculate_distance_between_abstractness_and_instability_including_subpackages.cypher\")\n",
+ "instabilityPerAbstractnessIncludingSubpackages = query_cypher_to_data_frame(\"../cypher/Metrics/Calculate_distance_between_abstractness_and_instability_for_Java_including_subpackages.cypher\")\n",
"instabilityPerAbstractnessIncludingSubpackages.head(30)"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "c4fdbb1d",
+ "metadata": {},
+ "source": [
+ "#### Table 5c\n",
+ "\n",
+ "- Show the top 30 Typescript modules with the highest distance from the \"main sequence\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0a1d2d24",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "instabilityPerAbstractnessTypescript = query_cypher_to_data_frame(\"../cypher/Metrics/Calculate_distance_between_abstractness_and_instability_for_Typescript.cypher\")\n",
+ "instabilityPerAbstractnessTypescript.head(30)"
+ ]
+ },
{
"attachments": {},
"cell_type": "markdown",
@@ -451,7 +615,7 @@
" x_position = data_frame.abstractness[index].item()\n",
" y_position = data_frame.instability[index].item()\n",
" artifact_name = data_frame.artifactName[index].item()\n",
- " package_name = data_frame.packageName[index].item()\n",
+ " package_name = data_frame.name[index].item()\n",
"\n",
" label_box=dict(boxstyle=\"round4,pad=0.5\", fc=\"w\", alpha=0.8)\n",
" plot.annotate(artifact_name + '\\n' + package_name\n",
@@ -466,11 +630,11 @@
"\n",
"def index_of_sorted(data_frame: pd.DataFrame, highest: list[str] = []):\n",
" \"\"\"\n",
- " Sorts the \"data_frame\" by columns 'abstractness','instability','typesInPackage', 'artifactName'\n",
+ " Sorts the \"data_frame\" by columns 'abstractness','instability','elementsCount', 'artifactName'\n",
" and returns the index of the first row.\n",
" Columns that are contained in the list of strings parameter \"highest\" will be sorted descending.\n",
" \"\"\"\n",
- " by = ['abstractness','instability','typesInPackage','artifactName']\n",
+ " by = ['abstractness','instability','elementsCount','artifactName']\n",
" ascending = [('abstractness' not in highest), ('instability' not in highest), False, True]\n",
" return data_frame.sort_values(by=by, ascending=ascending).head(1).index\n",
"\n",
@@ -504,7 +668,7 @@
" \"\"\"\n",
" \n",
" colormap=LinearSegmentedColormap.from_list('rg',[\"green\", \"gold\", \"orangered\", \"red\"], N=256) \n",
- " marker_scales_bounded=data_frame.typesInPackage.clip(lower=2, upper=300) * 0.7\n",
+ " marker_scales_bounded=data_frame.elementsCount.clip(lower=2, upper=300) * 0.7\n",
"\n",
" plot.scatter(\n",
" data_frame.abstractness, # x axis shows abstractness\n",
@@ -518,7 +682,7 @@
" plot.plot([0,1], [1,0], c='lightgreen', linestyle='dashed') \n",
"\n",
" # Annotate largest package\n",
- " annotate_plot(data_frame, index_of_highest_property(data_frame, highest='typesInPackage'))\n",
+ " annotate_plot(data_frame, index_of_highest_property(data_frame, highest='elementsCount'))\n",
" # Annotate largest package with the highest abstractness and instability\n",
" annotate_plot(data_frame, index_of_sorted(data_frame, highest=['abstractness','instability']))\n",
" # Annotate largest package with the lowest abstractness and highest instability\n",
@@ -552,7 +716,8 @@
"metadata": {},
"outputs": [],
"source": [
- "plot_instability_per_abstractness_with_main_sequence(instabilityPerAbstractness, 'Packages without their sub-packages')"
+ "if not instabilityPerAbstractness.empty:\n",
+ " plot_instability_per_abstractness_with_main_sequence(instabilityPerAbstractness, 'Packages without their sub-packages')"
]
},
{
@@ -570,7 +735,27 @@
"metadata": {},
"outputs": [],
"source": [
- "plot_instability_per_abstractness_with_main_sequence(instabilityPerAbstractnessIncludingSubpackages, 'Packages including their sub-packages')"
+ "if not instabilityPerAbstractnessIncludingSubpackages.empty:\n",
+ " plot_instability_per_abstractness_with_main_sequence(instabilityPerAbstractnessIncludingSubpackages, 'Packages including their sub-packages')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c7499522",
+ "metadata": {},
+ "source": [
+ "#### Figure 1c - Typescript Modules"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "02763566",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if not instabilityPerAbstractnessTypescript.empty:\n",
+ " plot_instability_per_abstractness_with_main_sequence(instabilityPerAbstractnessTypescript, 'Typescript modules')"
]
}
],
diff --git a/jupyter/Overview.ipynb b/jupyter/Overview.ipynb
index 2283684bd..ebdb30148 100644
--- a/jupyter/Overview.ipynb
+++ b/jupyter/Overview.ipynb
@@ -227,17 +227,20 @@
"metadata": {},
"outputs": [],
"source": [
- "plot.figure();\n",
- "types_per_artifact_grouped.head(30).plot(\n",
- " kind='bar', \n",
- " title='Top 30 types per artifact',\n",
- " xlabel='Artifact',\n",
- " ylabel='Types',\n",
- " stacked=True, \n",
- " cmap=main_color_map,\n",
- " figsize=(8, 5)\n",
- ")\n",
- "plot.show()"
+ "if types_per_artifact_grouped.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ " types_per_artifact_grouped.head(30).plot(\n",
+ " kind='bar', \n",
+ " title='Top 30 types per artifact',\n",
+ " xlabel='Artifact',\n",
+ " ylabel='Types',\n",
+ " stacked=True, \n",
+ " cmap=main_color_map,\n",
+ " figsize=(8, 5)\n",
+ " )\n",
+ " plot.show()"
]
},
{
@@ -277,14 +280,17 @@
"metadata": {},
"outputs": [],
"source": [
- "types_per_artifact_sorted_by_classes=types_per_artifact_grouped_normalized.sort_values(by='Class', ascending=False)\n",
- "\n",
- "plot.figure();\n",
- "types_per_artifact_sorted_by_classes.head(30).plot(kind='bar', stacked=True, cmap=main_color_map, figsize=(8, 5))\n",
- "plot.xlabel('Artifact')\n",
- "plot.ylabel('Types %')\n",
- "plot.title('Class types [%] per artifact')\n",
- "plot.show()"
+ "if types_per_artifact_grouped_normalized.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " types_per_artifact_sorted_by_classes=types_per_artifact_grouped_normalized.sort_values(by='Class', ascending=False)\n",
+ " \n",
+ " plot.figure();\n",
+ " types_per_artifact_sorted_by_classes.head(30).plot(kind='bar', stacked=True, cmap=main_color_map, figsize=(8, 5))\n",
+ " plot.xlabel('Artifact')\n",
+ " plot.ylabel('Types %')\n",
+ " plot.title('Class types [%] per artifact')\n",
+ " plot.show()"
]
},
{
@@ -302,14 +308,17 @@
"metadata": {},
"outputs": [],
"source": [
- "types_per_artifact_sorted_by_interfaces=types_per_artifact_grouped_normalized.sort_values(by='Interface', ascending=False)\n",
- "\n",
- "plot.figure();\n",
- "types_per_artifact_sorted_by_interfaces.head(30).plot(kind='bar', stacked=True, cmap=main_color_map, figsize=(8, 5))\n",
- "plot.xlabel('Artifact')\n",
- "plot.ylabel('Types %')\n",
- "plot.title('Interface types [%] per artifact')\n",
- "plot.show()"
+ "if types_per_artifact_grouped_normalized.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " types_per_artifact_sorted_by_interfaces=types_per_artifact_grouped_normalized.sort_values(by='Interface', ascending=False)\n",
+ " \n",
+ " plot.figure();\n",
+ " types_per_artifact_sorted_by_interfaces.head(30).plot(kind='bar', stacked=True, cmap=main_color_map, figsize=(8, 5))\n",
+ " plot.xlabel('Artifact')\n",
+ " plot.ylabel('Types %')\n",
+ " plot.title('Interface types [%] per artifact')\n",
+ " plot.show()"
]
},
{
@@ -327,14 +336,16 @@
"metadata": {},
"outputs": [],
"source": [
- "types_per_artifact_sorted_by_enums=types_per_artifact_grouped_normalized.sort_values(by='Enum', ascending=False)\n",
- "\n",
- "plot.figure();\n",
- "types_per_artifact_sorted_by_enums.head(30).plot(kind='bar', stacked=True, cmap=main_color_map, figsize=(8, 5))\n",
- "plot.xlabel('Artifact')\n",
- "plot.ylabel('Types %')\n",
- "plot.title('Enum types [%] per artifact')\n",
- "plot.show()"
+ "if types_per_artifact_grouped_normalized.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " types_per_artifact_sorted_by_enums=types_per_artifact_grouped_normalized.sort_values(by='Enum', ascending=False)\n",
+ " plot.figure();\n",
+ " types_per_artifact_sorted_by_enums.head(30).plot(kind='bar', stacked=True, cmap=main_color_map, figsize=(8, 5))\n",
+ " plot.xlabel('Artifact')\n",
+ " plot.ylabel('Types %')\n",
+ " plot.title('Enum types [%] per artifact')\n",
+ " plot.show()"
]
},
{
@@ -352,14 +363,16 @@
"metadata": {},
"outputs": [],
"source": [
- "types_per_artifact_sorted_by_annotations=types_per_artifact_grouped_normalized.sort_values(by='Annotation', ascending=False)\n",
- "\n",
- "plot.figure();\n",
- "types_per_artifact_sorted_by_annotations.head(30).plot(kind='bar', stacked=True, cmap=main_color_map, figsize=(8, 5))\n",
- "plot.xlabel('Artifact')\n",
- "plot.ylabel('Types %')\n",
- "plot.title('Annotation types [%] per artifact')\n",
- "plot.show()"
+ "if types_per_artifact_grouped_normalized.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " types_per_artifact_sorted_by_annotations=types_per_artifact_grouped_normalized.sort_values(by='Annotation', ascending=False)\n",
+ " plot.figure();\n",
+ " types_per_artifact_sorted_by_annotations.head(30).plot(kind='bar', stacked=True, cmap=main_color_map, figsize=(8, 5))\n",
+ " plot.xlabel('Artifact')\n",
+ " plot.ylabel('Types %')\n",
+ " plot.title('Annotation types [%] per artifact')\n",
+ " plot.show()"
]
},
{
@@ -453,20 +466,23 @@
" threshold= 0.7\n",
");\n",
"\n",
- "plot.figure();\n",
- "types_per_artifact_sorted_significant.plot(\n",
- " y='numberOfPackages', \n",
- " kind='pie', \n",
- " title='Number of packages per artifact', \n",
- " legend=True,\n",
- " labeldistance=None,\n",
- " autopct='%1.2f%%',\n",
- " textprops={'fontsize': 5},\n",
- " pctdistance=1.2,\n",
- " cmap=main_color_map\n",
- ")\n",
- "plot.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
- "plot.show()"
+ "if types_per_artifact_sorted_significant.empty:\n",
+ " print(\"No data to plot\")\n",
+ "else:\n",
+ " plot.figure();\n",
+ " types_per_artifact_sorted_significant.plot(\n",
+ " y='numberOfPackages', \n",
+ " kind='pie', \n",
+ " title='Number of packages per artifact', \n",
+ " legend=True,\n",
+ " labeldistance=None,\n",
+ " autopct='%1.2f%%',\n",
+ " textprops={'fontsize': 5},\n",
+ " pctdistance=1.2,\n",
+ " cmap=main_color_map\n",
+ " )\n",
+ " plot.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n",
+ " plot.show()"
]
}
],
diff --git a/jupyter/Wordcloud.ipynb b/jupyter/Wordcloud.ipynb
index 31dced7f2..9d1ca80ef 100644
--- a/jupyter/Wordcloud.ipynb
+++ b/jupyter/Wordcloud.ipynb
@@ -125,8 +125,8 @@
"outputs": [],
"source": [
"# Query data from graph database\n",
- "words = query_cypher_to_data_frame(\"../cypher/Overview/Words_for_Wordcloud.cypher\")\n",
- "words.head(20)"
+ "words = query_cypher_to_data_frame(\"../cypher/Overview/Words_for_universal_Wordcloud.cypher\")\n",
+ "words.head(30)"
]
},
{
@@ -138,18 +138,31 @@
"source": [
"# Join all words into one text separated by spaces\n",
"text = \" \".join(i for i in words.word)\n",
- "print(\"There are {} words in the dataset.\".format(len(words.word)))\n",
+ "number_of_words=len(words.word)\n",
+ "print(\"There are {} words in the dataset.\".format(number_of_words))\n",
"\n",
"# Define stop words\n",
"stopwords = set(STOPWORDS)\n",
- "stopwords.update(['builder', 'exception', 'abstract', 'helper', 'util', 'callback', 'factory', 'handler', 'repository', 'result'])\n",
- "wordcloud = WordCloud(stopwords=stopwords, background_color='white', colormap='viridis').generate(text)\n",
+ "stopwords.update(['builder', 'exception', 'abstract', 'helper', 'util', 'callback', 'factory', 'result',\n",
+ " 'handler', 'type', 'module', 'name', 'parameter', 'lambda', 'access', 'create', 'message', \n",
+ " 'ts', 'js', 'tsx', 'jsx', 'css', 'htm', 'html', 'props', 'use', 'id', 'ref', 'hook', 'event', \n",
+ " 'span', 'data', 'context', 'form', 'get', 'set', 'object', 'null', 'new'])\n",
"\n",
- "# Plot the word cloud\n",
- "plot.figure(figsize=(15,10))\n",
- "plot.imshow(wordcloud, interpolation='bilinear')\n",
- "plot.axis(\"off\")\n",
- "plot.show()"
+ "if number_of_words > 0:\n",
+ " wordcloud = WordCloud(\n",
+ " width=800, \n",
+ " height=400,\n",
+ " max_words=400, \n",
+ " stopwords=stopwords,\n",
+ " background_color='white', \n",
+ " colormap='viridis'\n",
+ " ).generate(text)\n",
+ "\n",
+ " # Plot the word cloud\n",
+ " plot.figure(figsize=(15,10))\n",
+ " plot.imshow(wordcloud, interpolation='bilinear')\n",
+ " plot.axis(\"off\")\n",
+ " plot.show()"
]
}
],
diff --git a/renovate.json b/renovate.json
index c0e9ae3b5..037858350 100644
--- a/renovate.json
+++ b/renovate.json
@@ -20,6 +20,18 @@
"datasourceTemplate": "github-releases",
"extractVersionTemplate": "^axon-?(?.*?)$"
},
+ {
+ "fileMatch": [
+ "^(workflow-templates|\\.github\/workflows)\\/[^/]+\\.ya?ml$",
+ "(^|\\/)action\\.ya?ml$]"
+ ],
+ "matchStrings": [
+ "REACT_ROUTER_VERSION:\\s+?(?.*?)\\s+"
+ ],
+ "depNameTemplate": "remix-run/react-router",
+ "datasourceTemplate": "github-releases",
+ "extractVersionTemplate": "^react-router@?(?.*?)$"
+ },
{
"fileMatch": [
"README.md"
diff --git a/scripts/cleanupAfterReportGeneration.sh b/scripts/cleanupAfterReportGeneration.sh
new file mode 100644
index 000000000..f543e5f6b
--- /dev/null
+++ b/scripts/cleanupAfterReportGeneration.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+
+# Cleans up after report generation. This includes deleting empty files or in case no file is left deleting the report folder.
+
+# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
+set -o errexit -o pipefail
+
+# Read the first input argument containing the name of the cypher file
+if [ "$#" -ne 1 ]; then
+ echo "cleanupReports: Usage: $0 " >&2
+fi
+
+# Check the first input argument to be a valid file
+if [ ! -d "$1" ] ; then
+ echo "cleanupReports: $1 directory not found" >&2
+ exit 1
+fi
+
+report_directory=$1
+echo "cleanupReports: report_directory=${report_directory}"
+
+# Find all comma separated values (CSV) files in the report directory
+# and delete the ones that contain only one line (header) or less.
+find "${report_directory}" -type f -name "*.csv" | sort | while read -r report_file; do
+ number_of_lines=$(wc -l < "${report_file}" | awk '{print $1}')
+ if [[ "${number_of_lines}" -le 1 ]]; then
+ echo "cleanupReports: deleting empty (${number_of_lines} lines) report file ${report_file}"
+ rm -f "${report_file}"
+ fi
+done
+
+# Delete reports directory if its empty
+number_files_in_report_directory=$( find "${report_directory}" -type f | wc -l | awk '{print $1}' )
+if [[ "${number_files_in_report_directory}" -lt 1 ]]; then
+ echo "cleanupReports: deleting empty (${number_files_in_report_directory} files) directory ${report_directory}"
+ rm -rf "${report_directory}"
+fi
\ No newline at end of file
diff --git a/scripts/configuration/template-neo4jv5-jqassistant.yaml b/scripts/configuration/template-neo4jv5-jqassistant.yaml
index bf2b5d243..92182b760 100644
--- a/scripts/configuration/template-neo4jv5-jqassistant.yaml
+++ b/scripts/configuration/template-neo4jv5-jqassistant.yaml
@@ -26,6 +26,10 @@ jqassistant:
# version:
# classifier:
# type:
+ plugins:
+ - group-id: org.jqassistant.plugin.typescript
+ artifact-id: jqassistant-typescript-plugin
+ version: 1.0.0-RC1
# The store configuration
store:
diff --git a/scripts/downloader/downloadReactRouter.sh b/scripts/downloader/downloadReactRouter.sh
new file mode 100755
index 000000000..a8d6e2c6d
--- /dev/null
+++ b/scripts/downloader/downloadReactRouter.sh
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+
+# Downloads react-router (https://github.com/remix-run/react-router) from GitHub using git clone.
+# The source files are written into the "source" directory of the current analysis directory.
+# After scanning it with jQAssistant Typescript Plugin the resulting JSON will be moved into the "artifacts" directory.
+
+# Note: The #-framed blocks are those that are specific to this download.
+# The other parts of the script can be reused/copied as a reference to write other download scripts.
+
+# Note: This script is meant to be started within the temporary analysis directory (e.g. "temp/AnalysisName/")
+
+# Requires downloadMavenArtifact.sh
+
+# Get the analysis name from the middle part of the current file name (without prefix "download" and without extension)
+SCRIPT_FILE_NAME="$(basename -- "${BASH_SOURCE[0]}")"
+SCRIPT_FILE_NAME_WITHOUT_EXTENSION="${SCRIPT_FILE_NAME%%.*}"
+SCRIPT_FILE_NAME_WITHOUT_PREFIX_AND_EXTENSION="${SCRIPT_FILE_NAME_WITHOUT_EXTENSION##download}"
+ANALYSIS_NAME="${SCRIPT_FILE_NAME_WITHOUT_PREFIX_AND_EXTENSION}"
+
+echo "download${ANALYSIS_NAME}: SCRIPT_FILE_NAME=${SCRIPT_FILE_NAME}"
+echo "download${ANALYSIS_NAME}: SCRIPT_FILE_NAME_WITHOUT_EXTENSION=${SCRIPT_FILE_NAME_WITHOUT_EXTENSION}"
+echo "download${ANALYSIS_NAME}: ANALYSIS_NAME=${ANALYSIS_NAME}"
+
+# Read the first input argument containing the version(s) of the artifact(s)
+if [ "$#" -ne 1 ]; then
+ echo "Error (download${ANALYSIS_NAME}): Usage: $0 " >&2
+ exit 1
+fi
+PROJECT_VERSION=$1
+echo "download${ANALYSIS_NAME}: PROJECT_VERSION=${PROJECT_VERSION}"
+
+################################################################
+# Download react-router source files to be analyzed
+################################################################
+git clone https://github.com/remix-run/react-router.git source
+(
+ cd source || exit
+ git checkout "react-router@${PROJECT_VERSION}"
+ yarn install || yarn
+ npx --yes @jqassistant/ts-lce >jqassostant-typescript-scan.log
+)
+mkdir -p artifacts
+mv -nv "source/.reports/jqa/ts-output.json" "artifacts/ts-react-router-${PROJECT_VERSION}.json"
+################################################################
\ No newline at end of file
diff --git a/scripts/executeJupyterNotebookReports.sh b/scripts/executeJupyterNotebookReports.sh
new file mode 100755
index 000000000..fd09d9ea9
--- /dev/null
+++ b/scripts/executeJupyterNotebookReports.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+
+# Executes the Jupyter Notebook given with the command line option --jupyterNotebook and creates a report directory for the results (ipynb, md, pdf)..
+
+# Requires executeJupyterNotebook.sh, cleanupAfterReportGeneration.sh
+
+# Overrideable Constants (defaults also defined in sub scripts)
+REPORTS_DIRECTORY=${REPORTS_DIRECTORY:-"reports"}
+
+# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
+set -o errexit -o pipefail
+
+# Function to display script usage
+usage() {
+ echo "Usage: $0 --jupyterNotebook nameOfTheJupyterNotebook [--reportName nameOfTheReportsDirectory]"
+ echo "Example: $0 --jupyterNotebook ArtifactDependencies.ipynb"
+ exit 1
+}
+
+camel_to_kebab_case_file_name() {
+ basename "${1%.*}" | sed -r 's/([a-z0-9])([A-Z])/\1-\2/g' | tr '[:upper:]' '[:lower:]'
+}
+
+# Default values
+reportName=""
+jupyterNotebook=""
+
+# Parse command line arguments
+while [[ $# -gt 0 ]]; do
+ commandLineOption="${1}"
+ case ${commandLineOption} in
+ --jupyterNotebook)
+ jupyterNotebook="${2}"
+ shift
+ ;;
+ --reportName)
+ reportName="${2}"
+ shift
+ ;;
+
+ *)
+ echo "executeJupyterNotebookReports: Error: Unknown option: ${commandLineOption}"
+ usage
+ ;;
+ esac
+ shift
+done
+
+if [[ -z ${jupyterNotebook} ]]; then
+ echo "${USAGE}"
+ exit 1
+fi
+
+if [[ -z ${reportName} ]]; then
+ reportName=$(camel_to_kebab_case_file_name "${jupyterNotebook}")
+ echo "executeJupyterNotebookReports: reportName defaults to ${reportName}"
+fi
+
+## Get this "scripts" directory if not already set
+# Even if $BASH_SOURCE is made for Bourne-like shells it is also supported by others and therefore here the preferred solution.
+# CDPATH reduces the scope of the cd command to potentially prevent unintended directory changes.
+# This way non-standard tools like readlink aren't needed.
+SCRIPTS_DIR=${SCRIPTS_DIR:-$( CDPATH=. cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P )} # Repository directory containing the shell scripts
+echo "executeJupyterNotebookReports: SCRIPTS_DIR=${SCRIPTS_DIR}"
+
+# Get the "scripts" directory by taking the path of this script and going one directory up.
+REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR:-"${SCRIPTS_DIR}/reports"} # Repository directory containing the report scripts
+echo "executeJupyterNotebookReports: REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR}"
+
+# Get the "jupyter" directory by taking the path of this script and going two directory up and then to "jupyter".
+JUPYTER_NOTEBOOK_DIRECTORY=${JUPYTER_NOTEBOOK_DIRECTORY:-"${SCRIPTS_DIR}/../jupyter"} # Repository directory containing the Jupyter Notebooks
+echo "executeJupyterNotebookReports: JUPYTER_NOTEBOOK_DIRECTORY=${JUPYTER_NOTEBOOK_DIRECTORY}"
+
+# Create report directory
+FULL_REPORT_DIRECTORY="${REPORTS_DIRECTORY}/${reportName}"
+mkdir -p "${FULL_REPORT_DIRECTORY}"
+
+# Execute and convert the given Jupyter Notebook within the given reports directory
+(cd "${FULL_REPORT_DIRECTORY}" && exec "${SCRIPTS_DIR}/executeJupyterNotebook.sh" "${JUPYTER_NOTEBOOK_DIRECTORY}/${jupyterNotebook}")
+
+# Clean-up after report generation. Empty reports will be deleted.
+source "${SCRIPTS_DIR}/cleanupAfterReportGeneration.sh" "${FULL_REPORT_DIRECTORY}"
\ No newline at end of file
diff --git a/scripts/executeQueryFunctions.sh b/scripts/executeQueryFunctions.sh
index 9989318f8..f49f1e58e 100644
--- a/scripts/executeQueryFunctions.sh
+++ b/scripts/executeQueryFunctions.sh
@@ -46,31 +46,88 @@ execute_cypher_expect_results() {
execute_cypher_http_expect_results "${@}" # "${@}": Get all function arguments and forward them
}
+# Function to execute all Cypher queries in the given filesnames and returns the first non empty result with the default method.
+execute_cypher_queries_until_results() {
+ execute_cypher_http_queries_until_results "${@}" # "${@}": Get all function arguments and forward them
+}
+
# Function to execute a cypher query from the given file (first and only argument) using Neo4j's HTTP API
execute_cypher_http() {
# (Neo4j HTTP API Script) Execute the Cypher query contained in the file and print the results as CSV
source "${SCRIPTS_DIR}/executeQuery.sh" "${@}" # "${@}": Get all function arguments and forward them
}
-# Function to execute a cypher query from the given file (first and only argument) with a summarized (console) output using Neo4j's HTTP API
-execute_cypher_http_summarized() {
- results=$( execute_cypher_http "${@}" | wc -l ) # "${@}": Get all function arguments and forward them
- results=$((results - 2))
+# Function to execute a cypher query from the given file (first and only argument)
+# and returning number of resulting lines using Neo4j's HTTP API
+execute_cypher_http_number_of_lines_in_result() {
+ results=$( execute_cypher_http "${@}" | wc -l | awk '{print $1}' ) # "${@}"= Get all function arguments and forward them
+ results=$((results - 1))
+ echo "${results}"
+}
+
+# Function to execute a cypher query from the given file (first and only argument)
+# with a summarized (console) output using Neo4j's HTTP API
+execute_cypher_http_summarized() {
+ cypherFileName="${1}" # Get the Cypher file name from the first argument
+ results=$( execute_cypher_http_number_of_lines_in_result "${@}" ) # "${@}"= Get all function arguments and forward them
echo "$(basename -- "${cypherFileName}") (via http) result lines: ${results}"
}
-# Function to execute a cypher query from the given file (first and only argument) that fails on no result using Neo4j's HTTP API
+# Function to execute a cypher query from the given file (first and only argument)
+# that fails on no result using Neo4j's HTTP API
execute_cypher_http_expect_results() {
- # Get the Cypher file name from the first argument
- cypherFileName="${1}"
- results=$( execute_cypher_http "${cypherFileName}" | wc -l )
- results=$((results - 1))
- if [[ "$results" -lt 1 ]]; then
+ cypherFileName="${1}" # Get the Cypher file name from the first argument
+ results=$( execute_cypher_http_number_of_lines_in_result "${@}" ) # "${@}"= Get all function arguments and forward them
+ if [[ "${results}" -lt 1 ]]; then
echo "$(basename -- "${cypherFileName}") (via http) Error: Expected at least one entry but was ${results}" >&2
exit 1
fi
}
+# Executes all Cypher queries in the given filesnames and returns the first non empty result.
+# If all queries lead to an empty result then the last (empty) result is returned.
+# Takes one or more filenames as first arguments followed by optional query parameters (key=value).
+execute_cypher_http_queries_until_results() {
+ local cypherFileNames=""
+
+ while [[ $# -gt 0 ]]; do
+ arg="${1}" # Get the value of the current argument
+
+ if [ "${arg#*"="}" == "${arg}" ]; then
+ # The argument doesn't contain an equal sign and
+ # is therefore considered to be a filename (first arguments).
+ cypherFileNames+="\n${arg}"
+ else
+ # The argument contains an equal sign and is therefore the first query parameter.
+ # Keep the argument pointer unchanged (no shift) to use ${@} for all remaining arguments.
+ break;
+ fi
+ shift # iterate to the next argument
+ done
+ cypherFileNames="${cypherFileNames#'\n'}" # remove the leading new line character
+
+ # echo -e "debug execute_cypher_http_queries_until_results: ------------------"
+ # echo -e "debug execute_cypher_http_queries_until_results: cypherFileNames=${cypherFileNames}"
+ # echo -e "debug execute_cypher_http_queries_until_results: additional arguments=${*}"
+ # echo -e "debug execute_cypher_http_queries_until_results: ------------------"
+
+ echo -e "${cypherFileNames}" | while read -r cypherFileName; do
+ # echo "debug execute_cypher_until_results: execute cypherFileName=${cypherFileName}"
+ results=$( execute_cypher_http "${cypherFileName}" "${@}" )
+ # echo "debug execute_cypher_http_queries_until_results: results=${results}"
+
+ resultsCount=$(echo "${results}" | wc -l)
+ resultsCount=$((resultsCount - 1))
+ # echo "debug execute_cypher_http_queries_until_results: resultsCount=${resultsCount}"
+
+ if [[ "${resultsCount}" -gt 0 ]]; then
+ # Return the results when they aren't empty.
+ echo -en "${results}"
+ break;
+ fi
+ done
+}
+
cypher_shell_query_parameters() {
query_parameters=""
shift # ignore first argument containing the query file name
@@ -114,25 +171,72 @@ execute_cypher_shell() {
echo "\"Source Cypher File:\",\"$(basename -- "${cypherFileName}")\""
}
-# Function to execute a cypher query from the given file (first and only argument) with a summarized (console) output using "cypher-shell" provided by Neo4j
-execute_cypher_shell_summarized() {
- # Get the Cypher file name from the first argument
- cypherFileName="${1}"
+# Function to execute a cypher query from the given file (first and only argument)
+# and returning number of resulting lines using "cypher-shell" provided by Neo4j
+execute_cypher_shell_number_of_lines_in_result() {
+ results=$( execute_cypher_http "${@}" | wc -l | awk '{print $1}' ) # "${@}"= Get all function arguments and forward them
+ results=$((results - 1))
+ echo "${results}"
+}
- results=$( execute_cypher_shell ${cypherFileName} | wc -l )
- results=$((results - 2))
+# Function to execute a cypher query from the given file (first and only argument)
+# with a summarized (console) output using "cypher-shell" provided by Neo4j
+execute_cypher_shell_summarized() {
+ cypherFileName="${1}" # Get the Cypher file name from the first argument
+ results=$( execute_cypher_shell_number_of_lines_in_result "${@}" ) # "${@}"= Get all function arguments and forward them
echo "$(basename -- "${cypherFileName}") (via cypher-shell) result lines: ${results}"
}
# Function to execute a cypher query from the given file (first and only argument) that fails on no result using "cypher-shell" provided by Neo4j
execute_cypher_shell_expect_results() {
- # Get the Cypher file name from the first argument
- cypherFileName="${1}"
-
- results=$( execute_cypher_shell ${cypherFileName} | wc -l )
- results=$((results - 2))
- if [[ "$results" -lt 1 ]]; then
+ cypherFileName="${1}" # Get the Cypher file name from the first argument
+ results=$( execute_cypher_shell_number_of_lines_in_result "${@}" ) # "${@}"= Get all function arguments and forward them
+ if [[ "${results}" -lt 1 ]]; then
echo "$(basename -- "${cypherFileName}") (via cypher-shell) Error: Expected at least one entry but was ${results}" >&2
exit 1
fi
+}
+
+# Executes all Cypher queries in the given filesnames and returns the first non empty result using "cypher-shell" provided by Neo4j.
+# If all queries lead to an empty result then the last (empty) result is returned.
+# Takes one or more filenames as first arguments followed by optional query parameters (key=value).
+execute_cypher_shell_queries_until_results() {
+ local cypherFileNames=""
+
+ while [[ $# -gt 0 ]]; do
+ arg="${1}" # Get the value of the current argument
+
+ if [ "${arg#*"="}" == "${arg}" ]; then
+ # The argument doesn't contain an equal sign and
+ # is therefore considered to be a filename (first arguments).
+ cypherFileNames+="\n${arg}"
+ else
+ # The argument contains an equal sign and is therefore the first query parameter.
+ # Keep the argument pointer unchanged (no shift) to use ${@} for all remaining arguments.
+ break;
+ fi
+ shift # iterate to the next argument
+ done
+ cypherFileNames="${cypherFileNames#'\n'}" # remove the leading new line character
+
+ # echo -e "debug execute_cypher_shell_queries_until_results: ------------------"
+ # echo -e "debug execute_cypher_shell_queries_until_results: cypherFileNames=${cypherFileNames}"
+ # echo -e "debug execute_cypher_shell_queries_until_results: additional arguments=${*}"
+ # echo -e "debug execute_cypher_shell_queries_until_results: ------------------"
+
+ echo -e "${cypherFileNames}" | while read -r cypherFileName; do
+ # echo "debug execute_cypher_until_results: execute cypherFileName=${cypherFileName}"
+ results=$( execute_cypher_shell "${cypherFileName}" "${@}" )
+ # echo "debug execute_cypher_shell_queries_until_results: results=${results}"
+
+ resultsCount=$(echo "${results}" | wc -l)
+ resultsCount=$((resultsCount - 1))
+ # echo "debug execute_cypher_shell_queries_until_results: resultsCount=${resultsCount}"
+
+ if [[ "${resultsCount}" -gt 0 ]]; then
+ # Return the results when they aren't empty.
+ echo -en "${results}"
+ break;
+ fi
+ done
}
\ No newline at end of file
diff --git a/scripts/findTypescriptDataFiles.sh b/scripts/findTypescriptDataFiles.sh
new file mode 100755
index 000000000..b9a2d8e61
--- /dev/null
+++ b/scripts/findTypescriptDataFiles.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+# Echoes a list of Typescript data files starting with "ts-" and having extension "json" in the artifacts directory of sub directories.
+# Each name will be prefixed by "typescript:project::"and separated by one space character.
+# This list is meant to be used after the "-f" line command option for the jQAssistant scan command to include Typescript data files.
+
+# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
+set -o errexit -o pipefail
+
+ARTIFACTS_DIRECTORY=${ARTIFACTS_DIRECTORY:-"artifacts"}
+
+# Check if the artifacts directory exists
+if [ ! -d "./${ARTIFACTS_DIRECTORY}" ] ; then
+ echo "" # The artifact directory doesn't exist. There is no file at all.
+ exit 0
+fi
+
+find "./${ARTIFACTS_DIRECTORY}" -type f -name 'ts-*.json' -exec echo {} \; | sed 's/^/typescript:project::/' | tr '\n' ' '
\ No newline at end of file
diff --git a/scripts/prepareAnalysis.sh b/scripts/prepareAnalysis.sh
index ff8a67f30..79996946a 100644
--- a/scripts/prepareAnalysis.sh
+++ b/scripts/prepareAnalysis.sh
@@ -31,11 +31,12 @@ source "${SCRIPTS_DIR}/executeQueryFunctions.sh"
source "${SCRIPTS_DIR}/parseCsvFunctions.sh"
# Local Constants
-PACKAGE_WEIGHTS_CYPHER_DIR="$CYPHER_DIR/Package_Relationship_Weights"
-PACKAGE_METRICS_CYPHER_DIR="$CYPHER_DIR/Metrics"
+DEPENDS_ON_CYPHER_DIR="$CYPHER_DIR/DependsOn_Relationship_Weights"
+METRICS_CYPHER_DIR="$CYPHER_DIR/Metrics"
EXTERNAL_DEPENDENCIES_CYPHER_DIR="$CYPHER_DIR/External_Dependencies"
ARTIFACT_DEPENDENCIES_CYPHER_DIR="$CYPHER_DIR/Artifact_Dependencies"
TYPES_CYPHER_DIR="$CYPHER_DIR/Types"
+TYPESCRIPT_CYPHER_DIR="$CYPHER_DIR/Typescript_Enrichment"
# Preparation - Data verification: DEPENDS_ON releationships
dataVerificationResult=$( execute_cypher "${CYPHER_DIR}/Data_verification_DEPENDS_ON_relationships.cypher" "${@}")
@@ -45,21 +46,38 @@ if ! is_csv_column_greater_zero "${dataVerificationResult}" "sourceNodeCount"; t
fi
# Preparation - Create indices
-execute_cypher "${CYPHER_DIR}/Create_index_for_full_qualified_type_name.cypher"
+execute_cypher "${CYPHER_DIR}/Create_Java_Type_index_for_full_qualified_name.cypher"
+execute_cypher "${CYPHER_DIR}/Create_Typescript_index_for_full_qualified_name.cypher"
-# Preparation - Create DEPENDS_ON for every DEPENDS_ON_PACKAGE relationship
-execute_cypher_expect_results "${CYPHER_DIR}/Create_a_DEPENDS_ON_relationship_for_every_DEPENDS_ON_PACKAGE.cypher"
-execute_cypher_expect_results "${CYPHER_DIR}/Create_a_DEPENDS_ON_relationship_for_every_DEPENDS_ON_ARTIFACT.cypher"
+# Preparation - Create DEPENDS_ON for every DEPENDS_ON_* relationship
+# Workaround for https://github.com/jQAssistant/jqa-java-plugin/issues/44
+# execute_cypher "${CYPHER_DIR}/Create_a_DEPENDS_ON_relationship_for_every_DEPENDS_ON_PACKAGE.cypher"
+# execute_cypher "${CYPHER_DIR}/Create_a_DEPENDS_ON_relationship_for_every_DEPENDS_ON_ARTIFACT.cypher"
-# Preparation - Add weights to package DEPENDS_ON relationships
-execute_cypher_expect_results "${PACKAGE_WEIGHTS_CYPHER_DIR}/Add_weight_property_for_Interface_Dependencies_to_Package_DEPENDS_ON_Relationship.cypher"
-execute_cypher_expect_results "${PACKAGE_WEIGHTS_CYPHER_DIR}/Add_weight_property_to_Package_DEPENDS_ON_Relationship.cypher"
-execute_cypher_expect_results "${PACKAGE_WEIGHTS_CYPHER_DIR}/Add_weight25PercentInterfaces_to_Package_DEPENDS_ON_relationships.cypher"
-execute_cypher_expect_results "${PACKAGE_WEIGHTS_CYPHER_DIR}/Add_weight10PercentInterfaces_to_Package_DEPENDS_ON_relationships.cypher"
+# Preparation - Enrich Graph for Typescript by adding "module" and "name" properties
+execute_cypher "${TYPESCRIPT_CYPHER_DIR}/Add_name_and_module_properties.cypher"
-# Preparation - Add Package node properties "incomingDependencies" and "outgoingDependencies"
-execute_cypher_expect_results "${PACKAGE_METRICS_CYPHER_DIR}/Set_Incoming_Package_Dependencies.cypher"
-execute_cypher_expect_results "${PACKAGE_METRICS_CYPHER_DIR}/Set_Outgoing_Package_Dependencies.cypher"
+# Preparation - Enrich Graph for Typescript by adding relationships between Modules with the same globalFqn
+execute_cypher "${TYPESCRIPT_CYPHER_DIR}/Add_RESOLVES_TO_relationship_for_matching_modules.cypher"
+execute_cypher "${TYPESCRIPT_CYPHER_DIR}/Add_RESOLVES_TO_relationship_for_matching_declarations.cypher"
+execute_cypher "${TYPESCRIPT_CYPHER_DIR}/Add_DEPENDS_ON_relationship_to_resolved_modules.cypher"
+
+# Preparation - Add weights to Java Package DEPENDS_ON relationships
+execute_cypher "${DEPENDS_ON_CYPHER_DIR}/Add_weight_property_for_Java_Interface_Dependencies_to_Package_DEPENDS_ON_Relationship.cypher"
+execute_cypher "${DEPENDS_ON_CYPHER_DIR}/Add_weight_property_to_Java_Package_DEPENDS_ON_Relationship.cypher"
+execute_cypher "${DEPENDS_ON_CYPHER_DIR}/Add_weight25PercentInterfaces_to_Java_Package_DEPENDS_ON_relationships.cypher"
+execute_cypher "${DEPENDS_ON_CYPHER_DIR}/Add_weight10PercentInterfaces_to_Java_Package_DEPENDS_ON_relationships.cypher"
+
+# Preparation - Add weights to Typescript Module DEPENDS_ON relationships
+execute_cypher "${DEPENDS_ON_CYPHER_DIR}/Add_fine_grained_weights_for_Typescript_module_dependencies.cypher"
+
+# Preparation - Add Typescript Module node properties "incomingDependencies" and "outgoingDependencies"
+execute_cypher "${METRICS_CYPHER_DIR}/Set_Incoming_Typescript_Module_Dependencies.cypher"
+execute_cypher "${METRICS_CYPHER_DIR}/Set_Outgoing_Typescript_Module_Dependencies.cypher"
+
+# Preparation - Add Java Package node properties "incomingDependencies" and "outgoingDependencies"
+execute_cypher "${METRICS_CYPHER_DIR}/Set_Incoming_Java_Package_Dependencies.cypher"
+execute_cypher "${METRICS_CYPHER_DIR}/Set_Outgoing_Java_Package_Dependencies.cypher"
# Preparation - Label external types and annotations
# "external" means that there is no byte code available, not a primitive type and not a java type
@@ -72,12 +90,12 @@ execute_cypher "${TYPES_CYPHER_DIR}/Label_resolved_duplicate_types.cypher"
execute_cypher "${EXTERNAL_DEPENDENCIES_CYPHER_DIR}/Remove_external_type_and_annotation_labels.cypher"
execute_cypher "${EXTERNAL_DEPENDENCIES_CYPHER_DIR}/Label_external_types_and_annotations.cypher"
-# Preparation - Add Artifact node properties "incomingDependencies" and "outgoingDependencies"
-execute_cypher_expect_results "${ARTIFACT_DEPENDENCIES_CYPHER_DIR}/Incoming_Artifact_Dependencies.cypher"
-execute_cypher_expect_results "${ARTIFACT_DEPENDENCIES_CYPHER_DIR}/Outgoing_Artifact_Dependencies.cypher"
+# Preparation - Add Java Artifact node properties "incomingDependencies" and "outgoingDependencies"
+execute_cypher "${ARTIFACT_DEPENDENCIES_CYPHER_DIR}/Incoming_Java_Artifact_Dependencies.cypher"
+execute_cypher "${ARTIFACT_DEPENDENCIES_CYPHER_DIR}/Outgoing_Java_Artifact_Dependencies.cypher"
-# Preparation - Add Type node properties "incomingDependencies" and "outgoingDependencies"
-execute_cypher_expect_results "${PACKAGE_METRICS_CYPHER_DIR}/Set_Incoming_Type_Dependencies.cypher"
-execute_cypher_expect_results "${PACKAGE_METRICS_CYPHER_DIR}/Set_Outgoing_Type_Dependencies.cypher"
+# Preparation - Add Java Type node properties "incomingDependencies" and "outgoingDependencies"
+execute_cypher "${METRICS_CYPHER_DIR}/Set_Incoming_Java_Type_Dependencies.cypher"
+execute_cypher "${METRICS_CYPHER_DIR}/Set_Outgoing_Java_Type_Dependencies.cypher"
echo "prepareAnalysis: Preparation successful"
\ No newline at end of file
diff --git a/scripts/projectionFunctions.sh b/scripts/projectionFunctions.sh
new file mode 100644
index 000000000..e6b1ee322
--- /dev/null
+++ b/scripts/projectionFunctions.sh
@@ -0,0 +1,148 @@
+#!/usr/bin/env bash
+
+# Provides functions to create and delete Graph Projections for Neo4j Graph Data Science.
+# A Projection contains only selected nodes, relationships and properties of the main Graph
+# and is stored using compressed data structures for optimized in-memory processing.
+# By selecting only one Node and Relationship type, algorithms for homogeneous Graphs can be applied
+# that wouldn't lead to useable results using the whole heterogenous main Graph.
+
+# References:
+# - Native Projection: https://neo4j.com/docs/graph-data-science/current/management-ops/graph-creation/graph-project
+# - Graph management: https://neo4j.com/docs/graph-data-science/current/management-ops
+
+# Requires executeQueryFunctions.sh, parseCsvFunctions.sh
+
+# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
+set -o errexit -o pipefail
+
+## Get this "scripts" directory if not already set
+# Even if $BASH_SOURCE is made for Bourne-like shells it is also supported by others and therefore here the preferred solution.
+# CDPATH reduces the scope of the cd command to potentially prevent unintended directory changes.
+# This way non-standard tools like readlink aren't needed.
+SCRIPTS_DIR=${SCRIPTS_DIR:-$( CDPATH=. cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P )} # Repository directory containing the shell scripts
+
+# Get the "cypher" directory by taking the path of this script and going up one directory and then to "cypher".
+CYPHER_DIR=${CYPHER_DIR:-"${SCRIPTS_DIR}/../cypher"}
+
+# Get the directory within the "cypher" directory that contains the Cypher queries for projection management.
+PROJECTION_CYPHER_DIR="${CYPHER_DIR}/Dependencies_Projection"
+echo "projectionFunctions: PROJECTION_CYPHER_DIR=${PROJECTION_CYPHER_DIR}"
+
+# Define functions to execute a cypher query from within the given file (first and only argument)
+source "${SCRIPTS_DIR}/executeQueryFunctions.sh"
+
+# Define function(s) (e.g. is_csv_column_greater_zero) to parse CSV format strings from Cypher query results.
+source "${SCRIPTS_DIR}/parseCsvFunctions.sh"
+
+# Creates a directed Graph projection for dependencies between nodes specified by the parameter "dependencies_projection_node".
+# Nodes without incoming and outgoing dependencies will be filtered out using a subgraph.
+#
+# Returns true (=0) if the projection has been created successfully.
+# Returns false (=1) if the projection couldn't be created because of missing data.
+# Exits with an error if there are technical issues.
+#
+# Required Parameters:
+# - dependencies_projection=...
+# Name prefix for the in-memory projection name for dependencies. Example: "type-centrality"
+# - dependencies_projection_node=...
+# Label of the nodes that will be used for the projection. Example: "Type"
+# - dependencies_projection_weight_property=...
+# Name of the node property that contains the dependency weight. Example: "weight"
+createDirectedDependencyProjection() {
+ local projectionResult
+
+ execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_0_Check_Projectable.cypher" "${@}"
+ projectionCheckResult=$( execute_cypher_http_number_of_lines_in_result "${PROJECTION_CYPHER_DIR}/Dependencies_0_Check_Projectable.cypher" "${@}" )
+ if [[ "${projectionCheckResult}" -lt 1 ]]; then
+ return 1
+ fi
+ execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_1_Delete_Projection.cypher" "${@}"
+ execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
+ execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_3_Create_Projection.cypher" "${@}"
+ projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_5_Create_Subgraph.cypher" "${@}")
+ is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
+}
+
+# Creates an undirected Graph projection for dependencies between nodes specified by the parameter "dependencies_projection_node".
+# Nodes without incoming and outgoing dependencies will be filtered out using a subgraph.
+#
+# Returns true (=0) if the projection has been created successfully.
+# Returns false (=1) if the projection couldn't be created because of missing data.
+# Exits with an error if there are technical issues.
+#
+# Required Parameters:
+# - dependencies_projection=...
+# Name prefix for the in-memory projection name for dependencies. Example: "type-centrality"
+# - dependencies_projection_node=...
+# Label of the nodes that will be used for the projection. Example: "Type"
+# - dependencies_projection_weight_property=...
+# Name of the node property that contains the dependency weight. Example: "weight"
+createUndirectedDependencyProjection() {
+ local projectionResult
+
+ execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_0_Check_Projectable.cypher" "${@}"
+ projectionCheckResult=$( execute_cypher_http_number_of_lines_in_result "${PROJECTION_CYPHER_DIR}/Dependencies_0_Check_Projectable.cypher" "${@}" )
+ if [[ "${projectionCheckResult}" -lt 1 ]]; then
+ return 1
+ fi
+ execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_1_Delete_Projection.cypher" "${@}"
+ execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
+ execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_4_Create_Undirected_Projection.cypher" "${@}"
+ projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_5_Create_Subgraph.cypher" "${@}")
+ is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
+}
+
+# Creates a directed Graph projection specialized on Java Type dependencies.
+# Zero-degree nodes, external types, java types and duplicates are filtered out using a Cypher projection.
+#
+# Returns true (=0) if the projection has been created successfully.
+# Returns false (=1) if the projection couldn't be created because of missing data.
+# Exits with an error if there are technical issues.
+#
+# Required Parameters:
+# - dependencies_projection=...
+# Name prefix for the in-memory projection name for dependencies. Example: "package"
+createDirectedJavaTypeDependencyProjection() {
+ local projectionResult
+
+ execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
+ projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_3c_Create_Java_Type_Projection.cypher" "${@}")
+ is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
+}
+
+# Creates an undirected Graph projection specialized on Java Type dependencies.
+# Zero-degree nodes, external types, java types and duplicates are filtered out using a Cypher projection.
+#
+# Returns true (=0) if the projection has been created successfully.
+# Returns false (=1) if the projection couldn't be created because of missing data.
+# Exits with an error if there are technical issues.
+#
+# Required Parameters:
+# - dependencies_projection=...
+# Name prefix for the in-memory projection name for dependencies. Example: "package"
+createUndirectedJavaTypeDependencyProjection() {
+ local projectionResult
+
+ execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
+ projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_4c_Create_Undirected_Type_Projection.cypher" "${@}")
+ is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
+}
+
+# Creates a directed Graph projection specialized on Java Method dependencies.
+# Nodes without incoming and outgoing dependencies will be filtered out with a subgraph.
+#
+# Returns true (=0) if the projection has been created successfully.
+# Returns false (=1) if the projection couldn't be created because of missing data.
+# Exits with an error if there are technical issues.
+#
+# Required Parameters:
+# - dependencies_projection=...
+# Name prefix for the in-memory projection name for dependencies. Example: "package"
+createDirectedJavaMethodDependencyProjection() {
+ local projectionResult
+
+ execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
+ projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_3d_Create_Java_Method_Projection.cypher" "${@}")
+ is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
+}
+
diff --git a/scripts/reports/ArtifactDependenciesCsv.sh b/scripts/reports/ArtifactDependenciesCsv.sh
index dfcd907ad..3e38b9e83 100755
--- a/scripts/reports/ArtifactDependenciesCsv.sh
+++ b/scripts/reports/ArtifactDependenciesCsv.sh
@@ -3,7 +3,7 @@
# Executes "Artifact_Dependencies" Cypher queries to get the "artifact-dependencies-csv" CSV reports.
# It contains lists of dependencies across artifacts and hby ow many packages/types they are used by.
-# Requires executeQueryFunctions.sh
+# Requires executeQueryFunctions.sh, cleanupAfterReportGeneration.sh
# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
set -o errexit -o pipefail
@@ -38,11 +38,14 @@ mkdir -p "${FULL_REPORT_DIRECTORY}"
ARTIFACT_DEPENDENCIES_CYPHER_DIR="${CYPHER_DIR}/Artifact_Dependencies"
# Preparation: Set number of packages and types per artifact
-execute_cypher_expect_results "${ARTIFACT_DEPENDENCIES_CYPHER_DIR}/Set_number_of_packages_and_types_on_artifacts.cypher"
+execute_cypher "${ARTIFACT_DEPENDENCIES_CYPHER_DIR}/Set_number_of_Java_packages_and_types_on_artifacts.cypher"
execute_cypher "${ARTIFACT_DEPENDENCIES_CYPHER_DIR}/Most_used_internal_dependencies_acreoss_artifacts.cypher" > "${FULL_REPORT_DIRECTORY}/MostUsedDependenciesAcrossArtifacts.csv"
execute_cypher "${ARTIFACT_DEPENDENCIES_CYPHER_DIR}/Artifacts_with_dependencies_to_other_artifacts.cypher" > "${FULL_REPORT_DIRECTORY}/DependenciesAcrossArtifacts.csv"
execute_cypher "${ARTIFACT_DEPENDENCIES_CYPHER_DIR}/Artifacts_with_duplicate_packages.cypher" > "${FULL_REPORT_DIRECTORY}/DuplicatePackageNamesAcrossArtifacts.csv"
execute_cypher "${ARTIFACT_DEPENDENCIES_CYPHER_DIR}/Usage_and_spread_of_internal_artifact_dependencies.cypher" > "${FULL_REPORT_DIRECTORY}/InternalArtifactUsageSpreadPerDependency.csv"
-execute_cypher "${ARTIFACT_DEPENDENCIES_CYPHER_DIR}/Usage_and_spread_of_internal_artifact_dependents.cypher" > "${FULL_REPORT_DIRECTORY}/InternalArtifactUsageSpreadPerDependent.csv"
\ No newline at end of file
+execute_cypher "${ARTIFACT_DEPENDENCIES_CYPHER_DIR}/Usage_and_spread_of_internal_artifact_dependents.cypher" > "${FULL_REPORT_DIRECTORY}/InternalArtifactUsageSpreadPerDependent.csv"
+
+# Clean-up after report generation. Empty reports will be deleted.
+source "${SCRIPTS_DIR}/cleanupAfterReportGeneration.sh" "${FULL_REPORT_DIRECTORY}"
\ No newline at end of file
diff --git a/scripts/reports/ArtifactDependenciesJupyter.sh b/scripts/reports/ArtifactDependenciesJupyter.sh
deleted file mode 100755
index 57c839e2b..000000000
--- a/scripts/reports/ArtifactDependenciesJupyter.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env bash
-
-# Creates the "artifact-dependencies" report (ipynb, md, pdf) based on the Jupyter Notebook "ArtifactDependencies.ipynb".
-# It contains the hierarchical artifact dependencies graph
-
-# Requires executeJupyterNotebook.sh
-
-# Overrideable Constants (defaults also defined in sub scripts)
-REPORTS_DIRECTORY=${REPORTS_DIRECTORY:-"reports"}
-
-# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
-set -o errexit -o pipefail
-
-## Get this "scripts/reports" directory if not already set
-# Even if $BASH_SOURCE is made for Bourne-like shells it is also supported by others and therefore here the preferred solution.
-# CDPATH reduces the scope of the cd command to potentially prevent unintended directory changes.
-# This way non-standard tools like readlink aren't needed.
-REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR:-$( CDPATH=. cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P )}
-echo "ArtifactDependenciesJupyter: REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR}"
-
-# Get the "scripts" directory by taking the path of this script and going one directory up.
-SCRIPTS_DIR=${SCRIPTS_DIR:-"${REPORTS_SCRIPT_DIR}/.."} # Repository directory containing the shell scripts
-echo "ArtifactDependenciesJupyter: SCRIPTS_DIR=${SCRIPTS_DIR}"
-
-# Get the "jupyter" directory by taking the path of this script and going two directory up and then to "jupyter".
-JUPYTER_NOTEBOOK_DIRECTORY=${JUPYTER_NOTEBOOK_DIRECTORY:-"${SCRIPTS_DIR}/../jupyter"} # Repository directory containing the Jupyter Notebooks
-echo "ArtifactDependenciesJupyter: JUPYTER_NOTEBOOK_DIRECTORY=$JUPYTER_NOTEBOOK_DIRECTORY"
-
-# Get the "cypher" directory by taking the path of this script and going two directory up and then to "cypher".
-CYPHER_DIR=${CYPHER_DIR:-"${REPORTS_SCRIPT_DIR}/../../cypher"}
-echo "ArtifactDependenciesJupyter CYPHER_DIR=${CYPHER_DIR}"
-
-# Create report directory
-REPORT_NAME="artifact-dependencies"
-FULL_REPORT_DIRECTORY="${REPORTS_DIRECTORY}/${REPORT_NAME}"
-mkdir -p "${FULL_REPORT_DIRECTORY}"
-
-# Execute and convert the Jupyter Notebook "ArtifactDependencies.ipynb" within the given reports directory
-(cd "${FULL_REPORT_DIRECTORY}" && exec "${SCRIPTS_DIR}/executeJupyterNotebook.sh" "${JUPYTER_NOTEBOOK_DIRECTORY}/ArtifactDependencies.ipynb")
\ No newline at end of file
diff --git a/scripts/reports/CentralityCsv.sh b/scripts/reports/CentralityCsv.sh
index cbd78c607..9adfbed55 100755
--- a/scripts/reports/CentralityCsv.sh
+++ b/scripts/reports/CentralityCsv.sh
@@ -5,7 +5,7 @@
# The reports (csv files) will be written into the sub directory reports/centrality-csv.
# Note that "scripts/prepareAnalysis.sh" is required to run prior to this script.
-# Requires executeQueryFunctions.sh, parseCsvFunctions.sh
+# Requires executeQueryFunctions.sh, projectionFunctions.sh, cleanupAfterReportGeneration.sh
# Overrideable Constants (defaults also defined in sub scripts)
REPORTS_DIRECTORY=${REPORTS_DIRECTORY:-"reports"}
@@ -31,68 +31,14 @@ echo "centralityCsv: CYPHER_DIR=$CYPHER_DIR"
# Define functions to execute a cypher query from within the given file (first and only argument)
source "${SCRIPTS_DIR}/executeQueryFunctions.sh"
-# Define function(s) (e.g. is_csv_column_greater_zero) to parse CSV format strings from Cypher query results.
-source "${SCRIPTS_DIR}/parseCsvFunctions.sh"
+# Define functions to create and delete Graph Projections like "createDirectedDependencyProjection"
+source "${SCRIPTS_DIR}/projectionFunctions.sh"
# Create report directory
REPORT_NAME="centrality-csv"
FULL_REPORT_DIRECTORY="${REPORTS_DIRECTORY}/${REPORT_NAME}"
mkdir -p "${FULL_REPORT_DIRECTORY}"
-# Centrality preparation for dependencies between Artifacts, Packages and Types.
-# Selects the dependent nodes and relationships for the algorithm and creates an in-memory projection.
-# Nodes without incoming and outgoing dependencies will be filtered out with a subgraph.
-#
-# Required Parameters:
-# - dependencies_projection=...
-# Name prefix for the in-memory projection name for dependencies. Example: "type-centrality"
-# - dependencies_projection_node=...
-# Label of the nodes that will be used for the projection. Example: "Type"
-# - dependencies_projection_weight_property=...
-# Name of the node property that contains the dependency weight. Example: "weight"
-createDependencyProjection() {
- local PROJECTION_CYPHER_DIR="$CYPHER_DIR/Dependencies_Projection"
- local projectionResult
-
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_1_Delete_Projection.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_3_Create_Projection.cypher" "${@}"
- projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_5_Create_Subgraph.cypher" "${@}")
- is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
-}
-
-# Centrality preparation for Type nodes
-# Selects the Type nodes and relationships for the algorithm and creates an in-memory projection.
-# Nodes without incoming and outgoing dependencies will be filtered out with a subgraph.
-#
-# Required Parameters:
-# - dependencies_projection=...
-# Name prefix for the in-memory projection name for dependencies. Example: "package"
-createTypeProjection() {
- local PROJECTION_CYPHER_DIR="$CYPHER_DIR/Dependencies_Projection"
- local projectionResult
-
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
- projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_3c_Create_Type_Projection.cypher" "${@}")
- is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
-}
-
-# Centrality preparation for method calls
-# Selects the method nodes and relationships for the algorithm and creates an in-memory projection.
-# Nodes without incoming and outgoing dependencies will be filtered out with a subgraph.
-#
-# Required Parameters:
-# - dependencies_projection=...
-# Name prefix for the in-memory projection name for dependencies. Example: "package"
-createMethodProjection() {
- local PROJECTION_CYPHER_DIR="$CYPHER_DIR/Method_Projection"
- local projectionResult
-
- execute_cypher "${PROJECTION_CYPHER_DIR}/Methods_1_Delete_Projection.cypher" "${@}"
- projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Methods_2_Create_Projection.cypher" "${@}")
- is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
-}
-
# Apply the centrality algorithm "Page Rank".
#
# Required Parameters:
@@ -397,7 +343,7 @@ ARTIFACT_WEIGHT="dependencies_projection_weight_property=weight"
# Artifact Centrality
echo "centralityCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing artifact dependencies..."
-if createDependencyProjection "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"; then
+if createDirectedDependencyProjection "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"; then
runCentralityAlgorithms "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"
else
echo "centralityCsv: No data. Artifacts analysis skipped."
@@ -411,7 +357,7 @@ PACKAGE_WEIGHT="dependencies_projection_weight_property=weight25PercentInterface
# Package Centrality
echo "centralityCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing package dependencies..."
-if createDependencyProjection "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"; then
+if createDirectedDependencyProjection "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"; then
runCentralityAlgorithms "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"
else
echo "centralityCsv: No data. Package analysis skipped."
@@ -425,7 +371,7 @@ TYPE_WEIGHT="dependencies_projection_weight_property=weight"
# Type Centrality
echo "centralityCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing type dependencies..."
-if createTypeProjection "${TYPE_PROJECTION}" "${TYPE_NODE}" "${TYPE_WEIGHT}"; then
+if createDirectedJavaTypeDependencyProjection "${TYPE_PROJECTION}" "${TYPE_NODE}" "${TYPE_WEIGHT}"; then
runCentralityAlgorithms "${TYPE_PROJECTION}" "${TYPE_NODE}" "${TYPE_WEIGHT}"
else
echo "centralityCsv: No data. Type analysis skipped."
@@ -439,11 +385,14 @@ METHOD_WEIGHT="dependencies_projection_weight_property="
# Method Centrality
echo "centralityCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing method dependencies..."
-if createMethodProjection "${METHOD_PROJECTION}"; then
+if createDirectedJavaMethodDependencyProjection "${METHOD_PROJECTION}"; then
runCentralityAlgorithms "${METHOD_PROJECTION}" "${METHOD_NODE}" "${METHOD_WEIGHT}"
else
echo "centralityCsv: No data. Method analysis skipped."
fi
# ---------------------------------------------------------------
+# Clean-up after report generation. Empty reports will be deleted.
+source "${SCRIPTS_DIR}/cleanupAfterReportGeneration.sh" "${FULL_REPORT_DIRECTORY}"
+
echo "centralityCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Successfully finished."
\ No newline at end of file
diff --git a/scripts/reports/CommunityCsv.sh b/scripts/reports/CommunityCsv.sh
index 3f83c1320..dfbf84529 100755
--- a/scripts/reports/CommunityCsv.sh
+++ b/scripts/reports/CommunityCsv.sh
@@ -6,7 +6,7 @@
# Note that "scripts/prepareAnalysis.sh" is required to run prior to this script.
-# Requires executeQueryFunctions.sh, parseCsvFunctions.sh
+# Requires executeQueryFunctions.sh projectionFunctions.sh, cleanupAfterReportGeneration.sh
# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
set -o errexit -o pipefail
@@ -32,52 +32,14 @@ echo "communityCsv: CYPHER_DIR=${CYPHER_DIR}"
# Define functions to execute a cypher query from within the given file (first and only argument)
source "${SCRIPTS_DIR}/executeQueryFunctions.sh"
-# Define function(s) (e.g. is_csv_column_greater_zero) to parse CSV format strings from Cypher query results.
-source "${SCRIPTS_DIR}/parseCsvFunctions.sh"
+# Define functions to create and delete Graph Projections like "createUndirectedDependencyProjection"
+source "${SCRIPTS_DIR}/projectionFunctions.sh"
# Create report directory
REPORT_NAME="community-csv"
FULL_REPORT_DIRECTORY="${REPORTS_DIRECTORY}/${REPORT_NAME}"
mkdir -p "${FULL_REPORT_DIRECTORY}"
-# Community Detection Preparation
-# Selects the nodes and relationships for the algorithm and creates an in-memory projection.
-# Nodes without incoming and outgoing dependencies will be filtered out with a subgraph.
-#
-# Required Parameters:
-# - dependencies_projection=...
-# Name prefix for the in-memory projection name for dependencies. Example: "package"
-# - dependencies_projection_node=...
-# Label of the nodes that will be used for the projection. Example: "Package"
-# - dependencies_projection_weight_property=...
-# Name of the node property that contains the dependency weight. Example: "weight"
-createProjection() {
- local PROJECTION_CYPHER_DIR="${CYPHER_DIR}/Dependencies_Projection"
- local projectionResult
-
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_1_Delete_Projection.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_4_Create_Undirected_Projection.cypher" "${@}"
- projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_5_Create_Subgraph.cypher" "${@}")
- is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
-}
-
-# Community Detection Preparation for Types
-# Selects the Type nodes and relationships for the algorithm and creates an in-memory projection.
-# Nodes without incoming and outgoing dependencies will be filtered out with a subgraph.
-#
-# Required Parameters:
-# - dependencies_projection=...
-# Name prefix for the in-memory projection name for dependencies. Example: "package"
-createTypeProjection() {
- local PROJECTION_CYPHER_DIR="${CYPHER_DIR}/Dependencies_Projection"
- local projectionResult
-
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
- projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_4c_Create_Undirected_Type_Projection.cypher" "${@}")
- is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
-}
-
# Community Detection using the Louvain Algorithm
#
# Required Parameters:
@@ -418,7 +380,7 @@ ARTIFACT_KCUT="dependencies_maxkcut=5" # default = 2
# Artifact Community Detection
echo "communityCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing artifact dependencies..."
-if createProjection "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"; then
+if createUndirectedDependencyProjection "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"; then
detectCommunities "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}" "${ARTIFACT_GAMMA}" "${ARTIFACT_KCUT}"
writeLeidenModularity "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"
else
@@ -435,7 +397,7 @@ PACKAGE_KCUT="dependencies_maxkcut=20" # default = 2
# Package Community Detection
echo "communityCsv: $(date +'%Y-%m-%dT%H:%M:%S%z'): Processing package dependencies..."
-if createProjection "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"; then
+if createUndirectedDependencyProjection "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"; then
detectCommunities "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}" "${PACKAGE_GAMMA}" "${PACKAGE_KCUT}"
writeLeidenModularity "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"
@@ -455,7 +417,7 @@ TYPE_KCUT="dependencies_maxkcut=100" # default = 2
# Type Community Detection
echo "communityCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing type dependencies..."
-if createTypeProjection "${TYPE_PROJECTION}"; then
+if createUndirectedJavaTypeDependencyProjection "${TYPE_PROJECTION}"; then
detectCommunities "${TYPE_PROJECTION}" "${TYPE_NODE}" "${TYPE_WEIGHT}" "${TYPE_GAMMA}" "${TYPE_KCUT}"
# Type Community Detection - Special CSV Queries after update
@@ -467,4 +429,7 @@ else
fi
# ---------------------------------------------------------------
+# Clean-up after report generation. Empty reports will be deleted.
+source "${SCRIPTS_DIR}/cleanupAfterReportGeneration.sh" "${FULL_REPORT_DIRECTORY}"
+
echo "communityCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Successfully finished."
\ No newline at end of file
diff --git a/scripts/reports/ExternalDependenciesCsv.sh b/scripts/reports/ExternalDependenciesCsv.sh
index fe851e996..ed21676ba 100755
--- a/scripts/reports/ExternalDependenciesCsv.sh
+++ b/scripts/reports/ExternalDependenciesCsv.sh
@@ -3,7 +3,7 @@
# Executes "External_Dependencies" Cypher queries to get the "external-dependencies-csv" CSV reports.
# They list external library package usage like how often a external package is called.
-# Requires executeQueryFunctions.sh
+# Requires executeQueryFunctions.sh, cleanupAfterReportGeneration.sh
# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
set -o errexit -o pipefail
@@ -37,10 +37,9 @@ mkdir -p "${FULL_REPORT_DIRECTORY}"
# Local Constants
EXTERNAL_DEPENDENCIES_CYPHER_DIR="${CYPHER_DIR}/External_Dependencies"
-if ! execute_cypher_expect_results "${EXTERNAL_DEPENDENCIES_CYPHER_DIR}/List_external_types_used.cypher"; then
- echo "Please execute 'prepareAnalysis.sh' with 'Label_external_types_and_annotations.cypher' first."
- exit 1
-fi
+# Check if there are already labels for external Java types and create them otherwise
+execute_cypher_queries_until_results "${EXTERNAL_DEPENDENCIES_CYPHER_DIR}/List_external_Java_types_used.cypher" \
+ "${EXTERNAL_DEPENDENCIES_CYPHER_DIR}/Label_external_types_and_annotations.cypher"
execute_cypher "${EXTERNAL_DEPENDENCIES_CYPHER_DIR}/External_package_usage_overall.cypher" > "${FULL_REPORT_DIRECTORY}/External_package_usage_overall.csv"
execute_cypher "${EXTERNAL_DEPENDENCIES_CYPHER_DIR}/External_package_usage_spread.cypher" > "${FULL_REPORT_DIRECTORY}/External_package_usage_spread.csv"
@@ -55,4 +54,7 @@ execute_cypher "${EXTERNAL_DEPENDENCIES_CYPHER_DIR}/External_package_usage_per_a
execute_cypher "${EXTERNAL_DEPENDENCIES_CYPHER_DIR}/External_package_usage_per_artifact_and_external_package.cypher" > "${FULL_REPORT_DIRECTORY}/External_package_usage_per_artifact_and_external_package.csv"
execute_cypher "${EXTERNAL_DEPENDENCIES_CYPHER_DIR}/External_second_level_package_usage_per_artifact_and_external_package.cypher" > "${FULL_REPORT_DIRECTORY}/External_second_level_package_usage_per_artifact_and_external_package.csv"
-execute_cypher "${EXTERNAL_DEPENDENCIES_CYPHER_DIR}/Maven_POMs_and_their_declared_dependencies.cypher" > "${FULL_REPORT_DIRECTORY}/Maven_POM_dependencies.csv"
\ No newline at end of file
+execute_cypher "${EXTERNAL_DEPENDENCIES_CYPHER_DIR}/Maven_POMs_and_their_declared_dependencies.cypher" > "${FULL_REPORT_DIRECTORY}/Maven_POM_dependencies.csv"
+
+# Clean-up after report generation. Empty reports will be deleted.
+source "${SCRIPTS_DIR}/cleanupAfterReportGeneration.sh" "${FULL_REPORT_DIRECTORY}"
\ No newline at end of file
diff --git a/scripts/reports/ExternalDependenciesJupyter.sh b/scripts/reports/ExternalDependenciesJupyter.sh
deleted file mode 100755
index f6141724a..000000000
--- a/scripts/reports/ExternalDependenciesJupyter.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env bash
-
-# Creates the "overview" report (ipynb, md, pdf) based on the Jupyter Notebook "Overview.ipynb".
-# It contains a basic overview on how many Classes, Interfaces, Enums and Annotations earch artifact contains,
-# how they relate to each other, distribution of Methods and their effective lines of code
-# and how the cyclomatic complexity is distributed across all Methods per artifact.
-
-# Requires executeJupyterNotebook.sh
-
-# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
-set -o errexit -o pipefail
-
-# Overrideable Constants (defaults also defined in sub scripts)
-REPORTS_DIRECTORY=${REPORTS_DIRECTORY:-"reports"}
-
-## Get this "scripts/reports" directory if not already set
-# Even if $BASH_SOURCE is made for Bourne-like shells it is also supported by others and therefore here the preferred solution.
-# CDPATH reduces the scope of the cd command to potentially prevent unintended directory changes.
-# This way non-standard tools like readlink aren't needed.
-REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR:-$( CDPATH=. cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P )}
-echo "OverviewJupyter: REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR}"
-
-# Get the "scripts" directory by taking the path of this script and going one directory up.
-SCRIPTS_DIR=${SCRIPTS_DIR:-"${REPORTS_SCRIPT_DIR}/.."} # Repository directory containing the shell scripts
-echo "OverviewJupyter: SCRIPTS_DIR=${SCRIPTS_DIR}"
-
-# Get the "jupyter" directory by taking the path of this script and going two directory up and then to "jupyter".
-JUPYTER_NOTEBOOK_DIRECTORY=${JUPYTER_NOTEBOOK_DIRECTORY:-"${SCRIPTS_DIR}/../jupyter"} # Repository directory containing the Jupyter Notebooks
-echo "OverviewJupyter: JUPYTER_NOTEBOOK_DIRECTORY=$JUPYTER_NOTEBOOK_DIRECTORY"
-
-# Create report directory
-REPORT_NAME="external-dependencies"
-FULL_REPORT_DIRECTORY="${REPORTS_DIRECTORY}/${REPORT_NAME}"
-mkdir -p "${FULL_REPORT_DIRECTORY}"
-
-# Execute and convert the following Jupyter Notebook within the given reports directory
-(cd "${FULL_REPORT_DIRECTORY}" && exec "${SCRIPTS_DIR}/executeJupyterNotebook.sh" "${JUPYTER_NOTEBOOK_DIRECTORY}/ExternalDependencies.ipynb")
\ No newline at end of file
diff --git a/scripts/reports/InternalDependenciesCsv.sh b/scripts/reports/InternalDependenciesCsv.sh
index dd1ec86ac..068bf03f0 100755
--- a/scripts/reports/InternalDependenciesCsv.sh
+++ b/scripts/reports/InternalDependenciesCsv.sh
@@ -4,7 +4,7 @@
# It contains lists of e.g. incoming and outgoing package dependencies,
# abstractness, instability and the distance to the so called "main sequence".
-# Requires executeQueryFunctions.sh
+# Requires executeQueryFunctions.sh, cleanupAfterReportGeneration.sh
# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
set -o errexit -o pipefail
@@ -48,4 +48,7 @@ execute_cypher "${CYPHER_DIR}/Candidates_for_Interface_Segregation.cypher" > "${
execute_cypher "${INTERNAL_DEPENDENCIES_CYPHER_DIR}/List_types_that_are_used_by_many_different_packages.cypher" > "${FULL_REPORT_DIRECTORY}/WidelyUsedTypes.csv"
execute_cypher "${INTERNAL_DEPENDENCIES_CYPHER_DIR}/How_many_packages_compared_to_all_existing_are_used_by_dependent_artifacts.cypher" > "${FULL_REPORT_DIRECTORY}/ArtifactPackageUsage.csv"
-execute_cypher "${INTERNAL_DEPENDENCIES_CYPHER_DIR}/How_many_classes_compared_to_all_existing_in_the_same_package_are_used_by_dependent_packages_across_different_artifacts.cypher" > "${FULL_REPORT_DIRECTORY}/ClassesPerPackageUsageAcrossArtifacts.csv"
\ No newline at end of file
+execute_cypher "${INTERNAL_DEPENDENCIES_CYPHER_DIR}/How_many_classes_compared_to_all_existing_in_the_same_package_are_used_by_dependent_packages_across_different_artifacts.cypher" > "${FULL_REPORT_DIRECTORY}/ClassesPerPackageUsageAcrossArtifacts.csv"
+
+# Clean-up after report generation. Empty reports will be deleted.
+source "${SCRIPTS_DIR}/cleanupAfterReportGeneration.sh" "${FULL_REPORT_DIRECTORY}"
\ No newline at end of file
diff --git a/scripts/reports/InternalDependenciesJupyter.sh b/scripts/reports/InternalDependenciesJupyter.sh
deleted file mode 100755
index 4d3e4f194..000000000
--- a/scripts/reports/InternalDependenciesJupyter.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env bash
-
-# Creates the "internal-dependencies" report (ipynb, md, pdf) based on the Jupyter Notebook "InternalDependencies.ipynb".
-# It contains lists of e.g. cyclic dependencies, dependencies that are only used by a few packages,
-# classes that are used by many different packages and some more.
-
-# Requires executeJupyterNotebook.sh
-
-# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
-set -o errexit -o pipefail
-
-# Overrideable Constants (defaults also defined in sub scripts)
-REPORTS_DIRECTORY=${REPORTS_DIRECTORY:-"reports"}
-
-## Get this "scripts/reports" directory if not already set
-# Even if $BASH_SOURCE is made for Bourne-like shells it is also supported by others and therefore here the preferred solution.
-# CDPATH reduces the scope of the cd command to potentially prevent unintended directory changes.
-# This way non-standard tools like readlink aren't needed.
-REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR:-$( CDPATH=. cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P )}
-echo "InternalDependenciesJupyter: REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR}"
-
-# Get the "scripts" directory by taking the path of this script and going one directory up.
-SCRIPTS_DIR=${SCRIPTS_DIR:-"${REPORTS_SCRIPT_DIR}/.."} # Repository directory containing the shell scripts
-echo "InternalDependenciesJupyter: SCRIPTS_DIR=${SCRIPTS_DIR}"
-
-# Get the "jupyter" directory by taking the path of this script and going two directory up and then to "jupyter".
-JUPYTER_NOTEBOOK_DIRECTORY=${JUPYTER_NOTEBOOK_DIRECTORY:-"${SCRIPTS_DIR}/../jupyter"} # Repository directory containing the Jupyter Notebooks
-echo "InternalDependenciesJupyter: JUPYTER_NOTEBOOK_DIRECTORY=$JUPYTER_NOTEBOOK_DIRECTORY"
-
-# Create report directory
-REPORT_NAME="internal-dependencies"
-FULL_REPORT_DIRECTORY="${REPORTS_DIRECTORY}/${REPORT_NAME}"
-mkdir -p "${FULL_REPORT_DIRECTORY}"
-
-# Execute and convert the Jupyter Notebook "InternalDependencies.ipynb" within the given reports directory
-(cd "${FULL_REPORT_DIRECTORY}" && exec "${SCRIPTS_DIR}/executeJupyterNotebook.sh" "${JUPYTER_NOTEBOOK_DIRECTORY}/InternalDependencies.ipynb")
\ No newline at end of file
diff --git a/scripts/reports/JavaCsv.sh b/scripts/reports/JavaCsv.sh
index 3410f3ccf..ed29afb18 100755
--- a/scripts/reports/JavaCsv.sh
+++ b/scripts/reports/JavaCsv.sh
@@ -3,7 +3,7 @@
# Executes "Java" Cypher queries to get the "java-csv" CSV reports.
# It contains lists of e.g. reflection usage, annotated language elements and usage of deprecated elements.
-# Requires executeQueryFunctions.sh
+# Requires executeQueryFunctions.sh, cleanupAfterReportGeneration.sh
# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
set -o errexit -o pipefail
@@ -49,4 +49,7 @@ execute_cypher "${JAVA_CYPHER_DIR}/Annotated_code_elements_per_artifact.cypher"
execute_cypher "${JAVA_CYPHER_DIR}/JakartaEE_REST_Annotations.cypher" > "${FULL_REPORT_DIRECTORY}/JakartaEE_REST_Annotations.csv"
execute_cypher "${JAVA_CYPHER_DIR}/Spring_Web_Request_Annotations.cypher" > "${FULL_REPORT_DIRECTORY}/Spring_Web_Request_Annotations.csv"
+# Clean-up after report generation. Empty reports will be deleted.
+source "${SCRIPTS_DIR}/cleanupAfterReportGeneration.sh" "${FULL_REPORT_DIRECTORY}"
+
echo "JavaCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Successfully finished"
\ No newline at end of file
diff --git a/scripts/reports/MethodMetricsJupyter.sh b/scripts/reports/MethodMetricsJupyter.sh
deleted file mode 100755
index 4eb8dd0e6..000000000
--- a/scripts/reports/MethodMetricsJupyter.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env bash
-
-# Creates the method metrics report (ipynb, md, pdf) based on the Jupyter Notebook "MethodMetrics.ipynb".
-# It contains effective line counts and cyclomatic complexity of methods per artifact and package
-# and their distribution.
-
-# Requires executeJupyterNotebook.sh
-
-# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
-set -o errexit -o pipefail
-
-# Overrideable Constants (defaults also defined in sub scripts)
-REPORTS_DIRECTORY=${REPORTS_DIRECTORY:-"reports"}
-
-## Get this "scripts/reports" directory if not already set
-# Even if $BASH_SOURCE is made for Bourne-like shells it is also supported by others and therefore here the preferred solution.
-# CDPATH reduces the scope of the cd command to potentially prevent unintended directory changes.
-# This way non-standard tools like readlink aren't needed.
-REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR:-$( CDPATH=. cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P )}
-echo "MethodMetricsJupyter: REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR}"
-
-# Get the "scripts" directory by taking the path of this script and going one directory up.
-SCRIPTS_DIR=${SCRIPTS_DIR:-"${REPORTS_SCRIPT_DIR}/.."} # Repository directory containing the shell scripts
-echo "MethodMetricsJupyter: SCRIPTS_DIR=${SCRIPTS_DIR}"
-
-# Get the "jupyter" directory by taking the path of this script and going two directory up and then to "jupyter".
-JUPYTER_NOTEBOOK_DIRECTORY=${JUPYTER_NOTEBOOK_DIRECTORY:-"${SCRIPTS_DIR}/../jupyter"} # Repository directory containing the Jupyter Notebooks
-echo "MethodMetricsJupyter: JUPYTER_NOTEBOOK_DIRECTORY=$JUPYTER_NOTEBOOK_DIRECTORY"
-
-# Create report directory
-REPORT_NAME="method-metrics"
-FULL_REPORT_DIRECTORY="${REPORTS_DIRECTORY}/${REPORT_NAME}"
-mkdir -p "${FULL_REPORT_DIRECTORY}"
-
-# Execute and convert the following Jupyter Notebook within the given reports directory
-(cd "${FULL_REPORT_DIRECTORY}" && exec "${SCRIPTS_DIR}/executeJupyterNotebook.sh" "${JUPYTER_NOTEBOOK_DIRECTORY}/MethodMetrics.ipynb")
\ No newline at end of file
diff --git a/scripts/reports/NodeEmbeddingsCsv.sh b/scripts/reports/NodeEmbeddingsCsv.sh
index 0fc3db290..14b9519b2 100755
--- a/scripts/reports/NodeEmbeddingsCsv.sh
+++ b/scripts/reports/NodeEmbeddingsCsv.sh
@@ -6,7 +6,7 @@
# Note that "scripts/prepareAnalysis.sh" is required to run prior to this script.
-# Requires executeQueryFunctions.sh, parseCsvFunctions.sh
+# Requires executeQueryFunctions.sh, projectionFunctions.sh, cleanupAfterReportGeneration.sh
# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
set -o errexit -o pipefail
@@ -32,86 +32,14 @@ echo "nodeEmbeddingsCsv: CYPHER_DIR=${CYPHER_DIR}"
# Define functions to execute a cypher query from within the given file (first and only argument)
source "${SCRIPTS_DIR}/executeQueryFunctions.sh"
-# Define function(s) (e.g. is_csv_column_greater_zero) to parse CSV format strings from Cypher query results.
-source "${SCRIPTS_DIR}/parseCsvFunctions.sh"
+# Define functions to create and delete Graph Projections like "createDirectedDependencyProjection"
+source "${SCRIPTS_DIR}/projectionFunctions.sh"
# Create report directory
REPORT_NAME="node-embeddings-csv"
FULL_REPORT_DIRECTORY="${REPORTS_DIRECTORY}/${REPORT_NAME}"
mkdir -p "${FULL_REPORT_DIRECTORY}"
-# Node Embeddings Preparation: Create an undirected in-memory Graph.
-# Selects the nodes and relationships for the algorithm and creates an in-memory projection.
-# Nodes without incoming and outgoing dependencies will be filtered out with a subgraph.
-#
-# Required Parameters:
-# - dependencies_projection=...
-# Name prefix for the in-memory projection name for dependencies. Example: "package"
-# - dependencies_projection_node=...
-# Label of the nodes that will be used for the projection. Example: "Package"
-# - dependencies_projection_weight_property=...
-# Name of the node property that contains the dependency weight. Example: "weight"
-createUndirectedProjection() {
- local PROJECTION_CYPHER_DIR="${CYPHER_DIR}/Dependencies_Projection"
- local projectionResult
-
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_1_Delete_Projection.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_4_Create_Undirected_Projection.cypher" "${@}"
- projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_5_Create_Subgraph.cypher" "${@}")
- is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
-}
-
-# Node Embeddings Preparation: Create a directed in-memory Graph.
-# Selects the nodes and relationships for the algorithm and creates an in-memory projection.
-# Nodes without incoming and outgoing dependencies will be filtered out with a subgraph.
-#
-# Required Parameters:
-# - dependencies_projection=...
-# Name prefix for the in-memory projection name for dependencies. Example: "package"
-# - dependencies_projection_node=...
-# Label of the nodes that will be used for the projection. Example: "Package"
-# - dependencies_projection_weight_property=...
-# Name of the node property that contains the dependency weight. Example: "weight"
-createDirectedProjection() {
- local PROJECTION_CYPHER_DIR="${CYPHER_DIR}/Dependencies_Projection"
-
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_1_Delete_Projection.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_3_Create_Projection.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_5_Create_Subgraph.cypher" "${@}"
-}
-
-# Node Embeddings Preparation: Create a directed in-memory Graph for "Type" nodes.
-# Selects the Type nodes and relationships for the algorithm and creates an in-memory projection.
-# Nodes without incoming and outgoing dependencies will be filtered out with a subgraph.
-#
-# Required Parameters:
-# - dependencies_projection=...
-# Name prefix for the in-memory projection name for dependencies. Example: "package"
-createDirectedTypeProjection() {
- local PROJECTION_CYPHER_DIR="${CYPHER_DIR}/Dependencies_Projection"
-
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_3c_Create_Type_Projection.cypher" "${@}"
-}
-
-# Node Embeddings Preparation: Create a undirected in-memory Graph for "Type" nodes.
-# Selects the Type nodes and relationships for the algorithm and creates an in-memory projection.
-# Nodes without incoming and outgoing dependencies will be filtered out with a subgraph.
-#
-# Required Parameters:
-# - dependencies_projection=...
-# Name prefix for the in-memory projection name for dependencies. Example: "package"
-createUndirectedTypeProjection() {
- local PROJECTION_CYPHER_DIR="${CYPHER_DIR}/Dependencies_Projection"
- local projectionResult
-
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
- projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_4c_Create_Undirected_Type_Projection.cypher" "${@}")
- is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
-}
-
# Node Embeddings using Fast Random Projection
#
# Required Parameters:
@@ -225,11 +153,11 @@ ARTIFACT_DIMENSIONS="dependencies_projection_embedding_dimension=16"
# Artifact Node Embeddings
echo "nodeEmbeddingsCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing artifact dependencies..."
-if createUndirectedProjection "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"; then
+if createUndirectedDependencyProjection "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"; then
time nodeEmbeddingsWithFastRandomProjection "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}" "${ARTIFACT_DIMENSIONS}"
time nodeEmbeddingsWithHashGNN "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}" "${ARTIFACT_DIMENSIONS}"
- createDirectedProjection "${ARTIFACT_PROJECTION_DIRECTED}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"
+ createDirectedDependencyProjection "${ARTIFACT_PROJECTION_DIRECTED}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"
time nodeEmbeddingsWithNode2Vec "${ARTIFACT_PROJECTION_DIRECTED}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}" "${ARTIFACT_DIMENSIONS}"
else
echo "nodeEmbeddingsCsv: No data. Artifact analysis skipped."
@@ -246,11 +174,11 @@ PACKAGE_DIMENSIONS="dependencies_projection_embedding_dimension=32"
# Package Node Embeddings
echo "nodeEmbeddingsCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing package dependencies..."
-if createUndirectedProjection "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"; then
+if createUndirectedDependencyProjection "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"; then
time nodeEmbeddingsWithFastRandomProjection "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}" "${PACKAGE_DIMENSIONS}"
time nodeEmbeddingsWithHashGNN "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}" "${PACKAGE_DIMENSIONS}"
- createDirectedProjection "${PACKAGE_PROJECTION_DIRECTED}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"
+ createDirectedDependencyProjection "${PACKAGE_PROJECTION_DIRECTED}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"
time nodeEmbeddingsWithNode2Vec "${PACKAGE_PROJECTION_DIRECTED}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}" "${PACKAGE_DIMENSIONS}"
else
echo "nodeEmbeddingsCsv: No data. Package analysis skipped."
@@ -266,15 +194,18 @@ TYPE_DIMENSIONS="dependencies_projection_embedding_dimension=64"
# Type Node Embeddings
echo "nodeEmbeddingsCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing type dependencies..."
-if createUndirectedTypeProjection "${TYPE_PROJECTION}"; then
+if createUndirectedJavaTypeDependencyProjection "${TYPE_PROJECTION}"; then
time nodeEmbeddingsWithFastRandomProjection "${TYPE_PROJECTION}" "${TYPE_NODE}" "${TYPE_WEIGHT}" "${TYPE_DIMENSIONS}"
time nodeEmbeddingsWithHashGNN "${TYPE_PROJECTION}" "${TYPE_NODE}" "${TYPE_WEIGHT}" "${TYPE_DIMENSIONS}"
- createDirectedTypeProjection "${TYPE_PROJECTION_DIRECTED}"
+ createDirectedJavaTypeDependencyProjection "${TYPE_PROJECTION_DIRECTED}"
time nodeEmbeddingsWithNode2Vec "${TYPE_PROJECTION_DIRECTED}" "${TYPE_NODE}" "${TYPE_WEIGHT}" "${TYPE_DIMENSIONS}"
else
echo "nodeEmbeddingsCsv: No data. Type analysis skipped."
fi
# ---------------------------------------------------------------
+# Clean-up after report generation. Empty reports will be deleted.
+source "${SCRIPTS_DIR}/cleanupAfterReportGeneration.sh" "${FULL_REPORT_DIRECTORY}"
+
echo "nodeEmbeddingsCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Successfully finished."
\ No newline at end of file
diff --git a/scripts/reports/NodeEmbeddingsJupyter.sh b/scripts/reports/NodeEmbeddingsJupyter.sh
deleted file mode 100755
index bdafa252a..000000000
--- a/scripts/reports/NodeEmbeddingsJupyter.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env bash
-
-# Creates the "node-embeddings" report (ipynb, md, pdf) based on the Jupyter Notebook "NodeEmbeddings.ipynb".
-# It shows how to create node embeddings for package dependencies using "Fast Random Projection" and
-# how these embeddings can be further reduced in their dimensionality down to two dimensions for visualization.
-# The plot also shows the community as color and the PageRank as size to have a visual feedback on how well they are clustered.
-
-# Requires executeJupyterNotebook.sh
-
-# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
-set -o errexit -o pipefail
-
-# Overrideable Constants (defaults also defined in sub scripts)
-REPORTS_DIRECTORY=${REPORTS_DIRECTORY:-"reports"}
-
-## Get this "scripts/reports" directory if not already set
-# Even if $BASH_SOURCE is made for Bourne-like shells it is also supported by others and therefore here the preferred solution.
-# CDPATH reduces the scope of the cd command to potentially prevent unintended directory changes.
-# This way non-standard tools like readlink aren't needed.
-REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR:-$( CDPATH=. cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P )}
-echo "NodeEmbeddingsJupyter: REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR}"
-
-# Get the "scripts" directory by taking the path of this script and going one directory up.
-SCRIPTS_DIR=${SCRIPTS_DIR:-"${REPORTS_SCRIPT_DIR}/.."} # Repository directory containing the shell scripts
-echo "NodeEmbeddingsJupyter: SCRIPTS_DIR=${SCRIPTS_DIR}"
-
-# Get the "jupyter" directory by taking the path of this script and going two directory up and then to "jupyter".
-JUPYTER_NOTEBOOK_DIRECTORY=${JUPYTER_NOTEBOOK_DIRECTORY:-"${SCRIPTS_DIR}/../jupyter"} # Repository directory containing the Jupyter Notebooks
-echo "NodeEmbeddingsJupyter: JUPYTER_NOTEBOOK_DIRECTORY=$JUPYTER_NOTEBOOK_DIRECTORY"
-
-# Create report directory
-REPORT_NAME="node-embeddings"
-FULL_REPORT_DIRECTORY="${REPORTS_DIRECTORY}/${REPORT_NAME}"
-mkdir -p "${FULL_REPORT_DIRECTORY}"
-
-# Execute and convert the Jupyter Notebook "InternalDependencies.ipynb" within the given reports directory
-(cd "${FULL_REPORT_DIRECTORY}" && exec "${SCRIPTS_DIR}/executeJupyterNotebook.sh" "${JUPYTER_NOTEBOOK_DIRECTORY}/NodeEmbeddings.ipynb")
\ No newline at end of file
diff --git a/scripts/reports/ObjectOrientedDesignMetricsCsv.sh b/scripts/reports/ObjectOrientedDesignMetricsCsv.sh
index 2c2a3b553..c07029f68 100755
--- a/scripts/reports/ObjectOrientedDesignMetricsCsv.sh
+++ b/scripts/reports/ObjectOrientedDesignMetricsCsv.sh
@@ -4,7 +4,7 @@
# It contains lists of e.g. incoming and outgoing package dependencies,
# abstractness, instability and the distance to the so called "main sequence".
-# Requires executeQueryFunctions.sh
+# Requires executeQueryFunctions.sh, cleanupAfterReportGeneration.sh
# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
set -o errexit -o pipefail
@@ -38,27 +38,48 @@ mkdir -p "${FULL_REPORT_DIRECTORY}"
# Local Constants
METRICS_CYPHER_DIR="${CYPHER_DIR}/Metrics"
-echo "ObjectOrientedDesignMetricsCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing packages without sub-packages..."
+# Java Packages only without sub-packages
+echo "ObjectOrientedDesignMetricsCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing Java packages without sub-packages..."
+execute_cypher_queries_until_results "${METRICS_CYPHER_DIR}/Get_Incoming_Java_Package_Dependencies.cypher" \
+ "${METRICS_CYPHER_DIR}/Set_Incoming_Java_Package_Dependencies.cypher" \
+ > "${FULL_REPORT_DIRECTORY}/IncomingPackageDependenciesJava.csv"
+execute_cypher_queries_until_results "${METRICS_CYPHER_DIR}/Get_Outgoing_Java_Package_Dependencies.cypher" \
+ "${METRICS_CYPHER_DIR}/Set_Outgoing_Java_Package_Dependencies.cypher" \
+ > "${FULL_REPORT_DIRECTORY}/OutgoingPackageDependenciesJava.csv"
+execute_cypher "${METRICS_CYPHER_DIR}/Calculate_and_set_Instability_for_Java.cypher" > "${FULL_REPORT_DIRECTORY}/InstabilityJava.csv"
+execute_cypher "${METRICS_CYPHER_DIR}/Calculate_and_set_Abstractness_for_Java.cypher" > "${FULL_REPORT_DIRECTORY}/AbstractnessJava.csv"
+execute_cypher "${METRICS_CYPHER_DIR}/Calculate_distance_between_abstractness_and_instability_for_Java.cypher" > "${FULL_REPORT_DIRECTORY}/MainSequenceAbstractnessInstabilityDistanceJava.csv"
-# Packages only without sub-packages
-execute_cypher "${METRICS_CYPHER_DIR}/Set_Incoming_Package_Dependencies.cypher" > "${FULL_REPORT_DIRECTORY}/IncomingPackageDependencies.csv"
-execute_cypher "${METRICS_CYPHER_DIR}/Set_Outgoing_Package_Dependencies.cypher" > "${FULL_REPORT_DIRECTORY}/OutgoingPackageDependencies.csv"
-execute_cypher "${METRICS_CYPHER_DIR}/Calculate_and_set_Instability_outgoing_incoming_Dependencies.cypher" > "${FULL_REPORT_DIRECTORY}/Instability.csv"
-execute_cypher "${METRICS_CYPHER_DIR}/Calculate_and_set_Abstractness_including_Counts.cypher" > "${FULL_REPORT_DIRECTORY}/Abstractness.csv"
-execute_cypher "${METRICS_CYPHER_DIR}/Calculate_distance_between_abstractness_and_instability.cypher" > "${FULL_REPORT_DIRECTORY}/MainSequenceAbstractnessInstabilityDistance.csv"
-
-
-# Packages including sub-packages (overlapping/redundant)
+# Java Packages including sub-packages (overlapping/redundant)
# Since Java Packages are organized hierarchically,
# incoming dependencies can also be calculated by including all of their sub-packages.
# Top level packages like for example "org" and "org.company" are left out
# by assuring that only those packages are considered,
# that have other packages or types in the same hierarchy level ("siblings").
echo "ObjectOrientedDesignMetricsCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing packages including sub-packages..."
-execute_cypher "${METRICS_CYPHER_DIR}/Set_Incoming_Package_Dependencies_Including_Subpackages.cypher" > "${FULL_REPORT_DIRECTORY}/IncomingPackageDependenciesIncludingSubpackages.csv"
-execute_cypher "${METRICS_CYPHER_DIR}/Set_Outgoing_Package_Dependencies_Including_Subpackages.cypher" > "${FULL_REPORT_DIRECTORY}/OutgoingPackageDependenciesIncludingSubpackages.csv"
-execute_cypher "${METRICS_CYPHER_DIR}/Calculate_and_set_Instability_Including_Subpackages.cypher" > "${FULL_REPORT_DIRECTORY}/InstabilityIncludingSubpackages.csv"
-execute_cypher "${METRICS_CYPHER_DIR}/Calculate_and_set_Abstractness_including_Subpackages.cypher" > "${FULL_REPORT_DIRECTORY}/AbstractnessIncludingSubpackages.csv"
-execute_cypher "${METRICS_CYPHER_DIR}/Calculate_distance_between_abstractness_and_instability_including_subpackages.cypher" > "${FULL_REPORT_DIRECTORY}/MainSequenceAbstractnessInstabilityDistanceIncludingSubpackages.csv"
+execute_cypher_queries_until_results "${METRICS_CYPHER_DIR}/Get_Incoming_Java_Package_Dependencies_Including_Subpackages.cypher" \
+ "${METRICS_CYPHER_DIR}/Set_Incoming_Java_Package_Dependencies_Including_Subpackages.cypher" \
+ > "${FULL_REPORT_DIRECTORY}/IncomingPackageDependenciesIncludingSubpackagesJava.csv"
+execute_cypher_queries_until_results "${METRICS_CYPHER_DIR}/Get_Outgoing_Java_Package_Dependencies_Including_Subpackages.cypher" \
+ "${METRICS_CYPHER_DIR}/Set_Outgoing_Java_Package_Dependencies_Including_Subpackages.cypher" \
+ > "${FULL_REPORT_DIRECTORY}/OutgoingPackageDependenciesIncludingSubpackagesJava.csv"
+execute_cypher "${METRICS_CYPHER_DIR}/Calculate_and_set_Instability_for_Java_Including_Subpackages.cypher" > "${FULL_REPORT_DIRECTORY}/InstabilityIncludingSubpackagesJava.csv"
+execute_cypher "${METRICS_CYPHER_DIR}/Calculate_and_set_Abstractness_for_Java_including_Subpackages.cypher" > "${FULL_REPORT_DIRECTORY}/AbstractnessIncludingSubpackagesJava.csv"
+execute_cypher "${METRICS_CYPHER_DIR}/Calculate_distance_between_abstractness_and_instability_for_Java_including_subpackages.cypher" > "${FULL_REPORT_DIRECTORY}/MainSequenceAbstractnessInstabilityDistanceIncludingSubpackagesJava.csv"
+
+# Typescript Modules
+echo "ObjectOrientedDesignMetricsCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing Typescript modules..."
+execute_cypher_queries_until_results "${METRICS_CYPHER_DIR}/Get_Incoming_Typescript_Module_Dependencies.cypher" \
+ "${METRICS_CYPHER_DIR}/Set_Incoming_Typescript_Module_Dependencies.cypher" \
+ > "${FULL_REPORT_DIRECTORY}/IncomingPackageDependenciesTypescript.csv"
+execute_cypher_queries_until_results "${METRICS_CYPHER_DIR}/Get_Outgoing_Typescript_Module_Dependencies.cypher" \
+ "${METRICS_CYPHER_DIR}/Set_Outgoing_Typescript_Module_Dependencies.cypher" \
+ > "${FULL_REPORT_DIRECTORY}/OutgoingPackageDependenciesTypescript.csv"
+execute_cypher "${METRICS_CYPHER_DIR}/Calculate_and_set_Instability_for_Typescript.cypher" > "${FULL_REPORT_DIRECTORY}/InstabilityTypescript.csv"
+execute_cypher "${METRICS_CYPHER_DIR}/Calculate_and_set_Abstractness_for_Typescript.cypher" > "${FULL_REPORT_DIRECTORY}/AbstractnessTypescript.csv"
+execute_cypher "${METRICS_CYPHER_DIR}/Calculate_distance_between_abstractness_and_instability_for_Typescript.cypher" > "${FULL_REPORT_DIRECTORY}/MainSequenceAbstractnessInstabilityDistanceTypescript.csv"
+
+# Clean-up after report generation. Empty reports will be deleted.
+source "${SCRIPTS_DIR}/cleanupAfterReportGeneration.sh" "${FULL_REPORT_DIRECTORY}"
echo "ObjectOrientedDesignMetricsCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Successfully finished."
\ No newline at end of file
diff --git a/scripts/reports/ObjectOrientedDesignMetricsJupyter.sh b/scripts/reports/ObjectOrientedDesignMetricsJupyter.sh
deleted file mode 100755
index 8e154504a..000000000
--- a/scripts/reports/ObjectOrientedDesignMetricsJupyter.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env bash
-
-# Creates the "object-oriented-design-metrics" report (ipynb, md, pdf) based on the Jupyter Notebook "ObjectOrientedDesignMetrics.ipynb".
-# It contains lists of e.g. incoming and outgoing package dependencies,
-# abstractness, instability and the distance to the so called "main sequence".
-
-# Requires executeJupyterNotebook.sh
-
-# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
-set -o errexit -o pipefail
-
-# Overrideable Constants (defaults also defined in sub scripts)
-REPORTS_DIRECTORY=${REPORTS_DIRECTORY:-"reports"}
-
-## Get this "scripts/reports" directory if not already set
-# Even if $BASH_SOURCE is made for Bourne-like shells it is also supported by others and therefore here the preferred solution.
-# CDPATH reduces the scope of the cd command to potentially prevent unintended directory changes.
-# This way non-standard tools like readlink aren't needed.
-REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR:-$( CDPATH=. cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P )}
-echo "ObjectOrientedDesignMetricsJupyter: REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR}"
-
-# Get the "scripts" directory by taking the path of this script and going one directory up.
-SCRIPTS_DIR=${SCRIPTS_DIR:-"${REPORTS_SCRIPT_DIR}/.."} # Repository directory containing the shell scripts
-echo "ObjectOrientedDesignMetricsJupyter: SCRIPTS_DIR=${SCRIPTS_DIR}"
-
-# Get the "jupyter" directory by taking the path of this script and going two directory up and then to "jupyter".
-JUPYTER_NOTEBOOK_DIRECTORY=${JUPYTER_NOTEBOOK_DIRECTORY:-"${SCRIPTS_DIR}/../jupyter"} # Repository directory containing the Jupyter Notebooks
-echo "ObjectOrientedDesignMetricsJupyter: JUPYTER_NOTEBOOK_DIRECTORY=$JUPYTER_NOTEBOOK_DIRECTORY"
-
-# Create report directory
-REPORT_NAME="object-oriented-design-metrics"
-FULL_REPORT_DIRECTORY="${REPORTS_DIRECTORY}/${REPORT_NAME}"
-mkdir -p "${FULL_REPORT_DIRECTORY}"
-
-# Execute and convert the Jupyter Notebook "ObjectOrientedDesignMetrics.ipynb" within the given reports directory
-(cd "${FULL_REPORT_DIRECTORY}" && exec "${SCRIPTS_DIR}/executeJupyterNotebook.sh" "${JUPYTER_NOTEBOOK_DIRECTORY}/ObjectOrientedDesignMetrics.ipynb")
\ No newline at end of file
diff --git a/scripts/reports/OverviewCsv.sh b/scripts/reports/OverviewCsv.sh
index 55e5c9db4..9962c6109 100755
--- a/scripts/reports/OverviewCsv.sh
+++ b/scripts/reports/OverviewCsv.sh
@@ -3,7 +3,7 @@
# Executes "Overview" Cypher queries to get the "overview-csv" CSV reports.
# It contains the numbers of packages, types, methods, cyclic complexity, etc.
-# Requires executeQueryFunctions.sh
+# Requires executeQueryFunctions.sh, cleanupAfterReportGeneration.sh
# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
set -o errexit -o pipefail
@@ -43,4 +43,7 @@ execute_cypher "${OVERVIEW_CYPHER_DIR}/Effective_lines_of_method_code_per_packag
execute_cypher "${OVERVIEW_CYPHER_DIR}/Effective_lines_of_method_code_per_type.cypher" > "${FULL_REPORT_DIRECTORY}/Effective_lines_of_method_code_per_type.csv"
execute_cypher "${OVERVIEW_CYPHER_DIR}/Effective_Method_Line_Count_Distribution.cypher" > "${FULL_REPORT_DIRECTORY}/Effective_Method_Line_Count.csv"
execute_cypher "${OVERVIEW_CYPHER_DIR}/Number_of_packages_per_artifact.cypher" > "${FULL_REPORT_DIRECTORY}/Number_of_packages_per_artifact.csv"
-execute_cypher "${OVERVIEW_CYPHER_DIR}/Number_of_types_per_artifact.cypher" > "${FULL_REPORT_DIRECTORY}/Number_of_types_per_artifact.csv"
\ No newline at end of file
+execute_cypher "${OVERVIEW_CYPHER_DIR}/Number_of_types_per_artifact.cypher" > "${FULL_REPORT_DIRECTORY}/Number_of_types_per_artifact.csv"
+
+# Clean-up after report generation. Empty reports will be deleted.
+source "${SCRIPTS_DIR}/cleanupAfterReportGeneration.sh" "${FULL_REPORT_DIRECTORY}"
\ No newline at end of file
diff --git a/scripts/reports/SimilarityCsv.sh b/scripts/reports/SimilarityCsv.sh
index a4f845a74..fc420125b 100755
--- a/scripts/reports/SimilarityCsv.sh
+++ b/scripts/reports/SimilarityCsv.sh
@@ -5,7 +5,7 @@
# The reports (csv files) will be written into the sub directory reports/similarity-csv.
# Note that "scripts/prepareAnalysis.sh" is required to run prior to this script.
-# Requires executeQueryFunctions.sh, parseCsvFunctions.sh
+# Requires executeQueryFunctions.sh, projectionFunctions.sh, cleanupAfterReportGeneration.sh
# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
set -o errexit -o pipefail
@@ -31,52 +31,14 @@ echo "similarityCsv: CYPHER_DIR=$CYPHER_DIR"
# Define functions to execute a cypher query from within the given file (first and only argument)
source "${SCRIPTS_DIR}/executeQueryFunctions.sh"
-# Define function(s) (e.g. is_csv_column_greater_zero) to parse CSV format strings from Cypher query results.
-source "${SCRIPTS_DIR}/parseCsvFunctions.sh"
+# Define functions to create and delete Graph Projections like "createDirectedDependencyProjection"
+source "${SCRIPTS_DIR}/projectionFunctions.sh"
# Create report directory
REPORT_NAME="similarity-csv"
FULL_REPORT_DIRECTORY="${REPORTS_DIRECTORY}/${REPORT_NAME}"
mkdir -p "${FULL_REPORT_DIRECTORY}"
-# Similarity Preparation
-# Selects the nodes and relationships for the algorithm and creates an in-memory projection.
-# Nodes without incoming and outgoing dependencies will be filtered out with a subgraph.
-#
-# Required Parameters:
-# - dependencies_projection=...
-# Name prefix for the in-memory projection name for dependencies. Example: "package"
-# - dependencies_projection_node=...
-# Label of the nodes that will be used for the projection. Example: "Package"
-# - dependencies_projection_weight_property=...
-# Name of the node property that contains the dependency weight. Example: "weight"
-createProjection() {
- local PROJECTION_CYPHER_DIR="$CYPHER_DIR/Dependencies_Projection"
- local projectionResult
-
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_1_Delete_Projection.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_3_Create_Projection.cypher" "${@}"
- projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_5_Create_Subgraph.cypher" "${@}")
- is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
-}
-
-# Similarity Preparation for Types
-# Selects the Type nodes and relationships for the algorithm and creates an in-memory projection.
-# Nodes without incoming and outgoing dependencies will be filtered out with a subgraph.
-#
-# Required Parameters:
-# - dependencies_projection=...
-# Name prefix for the in-memory projection name for dependencies. Example: "package"
-createTypeProjection() {
- local PROJECTION_CYPHER_DIR="${CYPHER_DIR}/Dependencies_Projection"
- local projectionResult
-
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
- projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_3c_Create_Type_Projection.cypher" "${@}")
- is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
-}
-
# Apply the similarity algorithm "Similarity".
#
# Required Parameters:
@@ -118,7 +80,7 @@ ARTIFACT_WEIGHT="dependencies_projection_weight_property=weight"
# Artifact Similarity
echo "similarityCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing artifact dependencies..."
-if createProjection "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"; then
+if createDirectedDependencyProjection "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"; then
time similarity "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"
else
echo "similarityCsv: No data. Artifact analysis skipped."
@@ -132,7 +94,7 @@ PACKAGE_WEIGHT="dependencies_projection_weight_property=weight25PercentInterface
# Package Similarity
echo "similarityCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing package dependencies..."
-if createProjection "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"; then
+if createDirectedDependencyProjection "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"; then
time similarity "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"
else
echo "similarityCsv: No data. Package analysis skipped."
@@ -146,11 +108,14 @@ TYPE_WEIGHT="dependencies_projection_weight_property=weight"
# Type Similarity
echo "similarityCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing type dependencies..."
-if createTypeProjection "${TYPE_PROJECTION}"; then
+if createDirectedJavaTypeDependencyProjection "${TYPE_PROJECTION}"; then
time similarity "${TYPE_PROJECTION}" "${TYPE_NODE}" "${TYPE_WEIGHT}"
else
echo "similarityCsv: No data. Type analysis skipped."
fi
# ---------------------------------------------------------------
+# Clean-up after report generation. Empty reports will be deleted.
+source "${SCRIPTS_DIR}/cleanupAfterReportGeneration.sh" "${FULL_REPORT_DIRECTORY}"
+
echo "similarityCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Successfully finished."
\ No newline at end of file
diff --git a/scripts/reports/TopologicalSortCsv.sh b/scripts/reports/TopologicalSortCsv.sh
index dc1544b8f..c91751e89 100755
--- a/scripts/reports/TopologicalSortCsv.sh
+++ b/scripts/reports/TopologicalSortCsv.sh
@@ -7,7 +7,7 @@
# The reports (csv files) will be written into the sub directory reports/topology-csv.
# Note that "scripts/prepareAnalysis.sh" is required to run prior to this script.
-# Requires executeQueryFunctions.sh, parseCsvFunctions.sh
+# Requires executeQueryFunctions.sh, projectionFunctions.sh, cleanupAfterReportGeneration.sh
# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
set -o errexit -o pipefail
@@ -33,36 +33,14 @@ echo "topologicalSortCsv: CYPHER_DIR=$CYPHER_DIR"
# Define functions to execute a cypher query from within the given file (first and only argument)
source "${SCRIPTS_DIR}/executeQueryFunctions.sh"
-# Define function(s) (e.g. is_csv_column_greater_zero) to parse CSV format strings from Cypher query results.
-source "${SCRIPTS_DIR}/parseCsvFunctions.sh"
+# Define functions to create and delete Graph Projections like "createDirectedDependencyProjection"
+source "${SCRIPTS_DIR}/projectionFunctions.sh"
# Create report directory
REPORT_NAME="topology-csv"
FULL_REPORT_DIRECTORY="${REPORTS_DIRECTORY}/${REPORT_NAME}"
mkdir -p "${FULL_REPORT_DIRECTORY}"
-# Topological Sort Preparation
-# Selects the nodes and relationships for the algorithm and creates an in-memory projection.
-# Nodes without incoming and outgoing dependencies (zero degree) will be filtered out with a subgraph.
-#
-# Required Parameters:
-# - dependencies_projection=...
-# Name prefix for the in-memory projection name for dependencies. Example: "package"
-# - dependencies_projection_node=...
-# Label of the nodes that will be used for the projection. Example: "Package"
-# - dependencies_projection_weight_property=...
-# Name of the node property that contains the dependency weight. Example: "weight"
-createProjection() {
- local PROJECTION_CYPHER_DIR="$CYPHER_DIR/Dependencies_Projection"
- local projectionResult
-
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_1_Delete_Projection.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_2_Delete_Subgraph.cypher" "${@}"
- execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_3_Create_Projection.cypher" "${@}"
- projectionResult=$( execute_cypher "${PROJECTION_CYPHER_DIR}/Dependencies_5_Create_Subgraph.cypher" "${@}")
- is_csv_column_greater_zero "${projectionResult}" "relationshipCount"
-}
-
# Apply the algorithm "Topological Sort".
#
# Required Parameters:
@@ -93,7 +71,7 @@ ARTIFACT_WEIGHT="dependencies_projection_weight_property=weight"
# Artifact Topology
echo "topologicalSortCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing artifact dependencies..."
-if createProjection "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"; then
+if createDirectedDependencyProjection "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"; then
time topologicalSort "${ARTIFACT_PROJECTION}" "${ARTIFACT_NODE}" "${ARTIFACT_WEIGHT}"
else
echo "topologicalSortCsv: No data. Artifact analysis skipped."
@@ -107,7 +85,7 @@ PACKAGE_WEIGHT="dependencies_projection_weight_property=weight25PercentInterface
# Package Topology
echo "topologicalSortCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing package dependencies..."
-if createProjection "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"; then
+if createDirectedDependencyProjection "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"; then
time topologicalSort "${PACKAGE_PROJECTION}" "${PACKAGE_NODE}" "${PACKAGE_WEIGHT}"
else
echo "topologicalSortCsv: No data. Package analysis skipped."
@@ -121,11 +99,14 @@ TYPE_WEIGHT="dependencies_projection_weight_property=weight"
# Type Topology
echo "topologicalSortCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Processing type dependencies..."
-if createProjection "${TYPE_PROJECTION}" "${TYPE_NODE}" "${TYPE_WEIGHT}"; then
+if createDirectedJavaTypeDependencyProjection "${TYPE_PROJECTION}" "${TYPE_NODE}" "${TYPE_WEIGHT}"; then
time topologicalSort "${TYPE_PROJECTION}" "${TYPE_NODE}" "${TYPE_WEIGHT}"
else
echo "topologicalSortCsv: No data. Type analysis skipped."
fi
# ---------------------------------------------------------------
+# Clean-up after report generation. Empty reports will be deleted.
+source "${SCRIPTS_DIR}/cleanupAfterReportGeneration.sh" "${FULL_REPORT_DIRECTORY}"
+
echo "topologicalSortCsv: $(date +'%Y-%m-%dT%H:%M:%S%z') Successfully finished."
\ No newline at end of file
diff --git a/scripts/reports/VisibilityMetricsCsv.sh b/scripts/reports/VisibilityMetricsCsv.sh
index 045e15a07..252f89e43 100755
--- a/scripts/reports/VisibilityMetricsCsv.sh
+++ b/scripts/reports/VisibilityMetricsCsv.sh
@@ -4,7 +4,7 @@
# It contains lists of packages with their relative visibility (public types divided by all types)
# as well as the global statistics for every artifact.
-# Requires executeQueryFunctions.sh
+# Requires executeQueryFunctions.sh, cleanupAfterReportGeneration.sh
# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
set -o errexit -o pipefail
@@ -39,4 +39,7 @@ mkdir -p "${FULL_REPORT_DIRECTORY}"
VISIBILITY_CYPHER_DIR="${CYPHER_DIR}/Visibility"
execute_cypher "${VISIBILITY_CYPHER_DIR}/Global_relative_visibility_statistics_for_types.cypher" > "${FULL_REPORT_DIRECTORY}/RelativeVisibilityPerArtifact.csv"
-execute_cypher "${VISIBILITY_CYPHER_DIR}/Relative_visibility_public_types_to_all_types_per_package.cypher" > "${FULL_REPORT_DIRECTORY}/RelativeVisibilityPerPackage.csv"
\ No newline at end of file
+execute_cypher "${VISIBILITY_CYPHER_DIR}/Relative_visibility_public_types_to_all_types_per_package.cypher" > "${FULL_REPORT_DIRECTORY}/RelativeVisibilityPerPackage.csv"
+
+# Clean-up after report generation. Empty reports will be deleted.
+source "${SCRIPTS_DIR}/cleanupAfterReportGeneration.sh" "${FULL_REPORT_DIRECTORY}"
\ No newline at end of file
diff --git a/scripts/reports/VisibilityMetricsJupyter.sh b/scripts/reports/VisibilityMetricsJupyter.sh
deleted file mode 100755
index 7d1510399..000000000
--- a/scripts/reports/VisibilityMetricsJupyter.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-
-# Creates the "visibility-metrics" report (ipynb, md, pdf) based on the Jupyter Notebook "VisibilityMetrics.ipynb".
-# It contains lists of how many components are visible everywhere in comparison to all (including internal) components.
-
-# Requires executeJupyterNotebook.sh
-
-# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
-set -o errexit -o pipefail
-
-# Overrideable Constants (defaults also defined in sub scripts)
-REPORTS_DIRECTORY=${REPORTS_DIRECTORY:-"reports"}
-
-## Get this "scripts/reports" directory if not already set
-# Even if $BASH_SOURCE is made for Bourne-like shells it is also supported by others and therefore here the preferred solution.
-# CDPATH reduces the scope of the cd command to potentially prevent unintended directory changes.
-# This way non-standard tools like readlink aren't needed.
-REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR:-$( CDPATH=. cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P )}
-echo "VisibilityMetricsJupyter: REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR}"
-
-# Get the "scripts" directory by taking the path of this script and going one directory up.
-SCRIPTS_DIR=${SCRIPTS_DIR:-"${REPORTS_SCRIPT_DIR}/.."} # Repository directory containing the shell scripts
-echo "VisibilityMetricsJupyter: SCRIPTS_DIR=${SCRIPTS_DIR}"
-
-# Get the "jupyter" directory by taking the path of this script and going two directory up and then to "jupyter".
-JUPYTER_NOTEBOOK_DIRECTORY=${JUPYTER_NOTEBOOK_DIRECTORY:-"${SCRIPTS_DIR}/../jupyter"} # Repository directory containing the Jupyter Notebooks
-echo "VisibilityMetricsJupyter: JUPYTER_NOTEBOOK_DIRECTORY=$JUPYTER_NOTEBOOK_DIRECTORY"
-
-# Create report directory
-REPORT_NAME="visibility-metrics"
-FULL_REPORT_DIRECTORY="${REPORTS_DIRECTORY}/${REPORT_NAME}"
-mkdir -p "${FULL_REPORT_DIRECTORY}"
-
-# Execute and convert the Jupyter Notebook "VisibilityMetrics.ipynb" within the given reports directory
-(cd "${FULL_REPORT_DIRECTORY}" && exec "${SCRIPTS_DIR}/executeJupyterNotebook.sh" "${JUPYTER_NOTEBOOK_DIRECTORY}/VisibilityMetrics.ipynb")
\ No newline at end of file
diff --git a/scripts/reports/compilations/JupyterReports.sh b/scripts/reports/compilations/JupyterReports.sh
index a30a2c35b..425bf0435 100755
--- a/scripts/reports/compilations/JupyterReports.sh
+++ b/scripts/reports/compilations/JupyterReports.sh
@@ -1,12 +1,12 @@
#!/usr/bin/env bash
# Runs all Jupyter Notebook report scripts.
-# It only consideres scripts in the "reports" directory (overridable with REPORTS_SCRIPT_DIR) one directory above this one.
-# These require phython, conda (e.g. miniconda) as well as several packages.
+# It only considers scripts in the "reports" directory (overridable with REPORTS_SCRIPT_DIR) one directory above this one.
+# These require Python, Conda (e.g. Miniconda) as well as several packages.
# For PDF generation chromium is required additionally.
# Therefore these reports will take longer and require more resources than just plain database queries/procedures.
-# Requires reports/*.sh
+# Requires executeJupyterNotebookReports.sh, jupyter/*.ipynb
# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
set -o errexit -o pipefail
@@ -21,8 +21,17 @@ echo "JupyterReports: REPORT_COMPILATIONS_SCRIPT_DIR=${REPORT_COMPILATIONS_SCRIP
REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR:-$(dirname -- "${REPORT_COMPILATIONS_SCRIPT_DIR}")}
echo "JupyterReports: REPORTS_SCRIPT_DIR=${REPORTS_SCRIPT_DIR}"
+# Get the "scripts" directory by taking the scripts report path and going one directory up.
+SCRIPTS_DIR=${SCRIPTS_DIR:-$(dirname -- "${REPORTS_SCRIPT_DIR}")}
+echo "JupyterReports: SCRIPTS_DIR=${SCRIPTS_DIR}"
+
+# Get the "jupyter" directory by taking the path of the scripts directory, going up one directory and change then into "jupyter".
+JUPYTER_NOTEBOOK_DIRECTORY=${JUPYTER_NOTEBOOK_DIRECTORY:-"${SCRIPTS_DIR}/../jupyter"} # Repository directory containing the Jupyter Notebooks
+echo "JupyterReports: JUPYTER_NOTEBOOK_DIRECTORY=${JUPYTER_NOTEBOOK_DIRECTORY}"
+
# Run all report scripts
-for report_script_file in "${REPORTS_SCRIPT_DIR}"/*Jupyter.sh; do
- echo "JupyterReports: Starting ${report_script_file}...";
- source "${report_script_file}"
+for jupyter_notebook_file in "${JUPYTER_NOTEBOOK_DIRECTORY}"/*.ipynb; do
+ jupyter_notebook_file=$( basename "${jupyter_notebook_file}")
+ echo "JupyterReports: Executing ${jupyter_notebook_file}...";
+ source "${SCRIPTS_DIR}/executeJupyterNotebookReports.sh" --jupyterNotebook "${jupyter_notebook_file}"
done
diff --git a/scripts/resetAndScan.sh b/scripts/resetAndScan.sh
index 016b649fa..0518a6ce4 100755
--- a/scripts/resetAndScan.sh
+++ b/scripts/resetAndScan.sh
@@ -5,6 +5,7 @@
# CAUTION: This script deletes all relationships and nodes in the Neo4j Graph Database.
# Note: The environment variable NEO4J_INITIAL_PASSWORD is required to login to Neo4j.
+# Requires findTypescriptDataFiles.sh
# Fail on any error ("-e" = exit on first error, "-o pipefail" exist on errors within piped commands)
set -o errexit -o pipefail
@@ -65,10 +66,12 @@ else
echo "resetAndScan: jQAssistant configuration won't be changed since it already exists."
fi
+directoriesAndFilesToScan="${ARTIFACTS_DIRECTORY} $(source ${SCRIPTS_DIR}/findTypescriptDataFiles.sh)"
+
# Use jQAssistant to scan the downloaded artifacts and write the results into the separate, local Neo4j Graph Database
-echo "resetAndScan: Scanning ${ARTIFACTS_DIRECTORY} with jQAssistant CLI version ${JQASSISTANT_CLI_VERSION}"
+echo "resetAndScan: Using jQAssistant CLI version ${JQASSISTANT_CLI_VERSION} to scan: ${directoriesAndFilesToScan}"
-"${JQASSISTANT_BIN}"/jqassistant.sh scan -f ./${ARTIFACTS_DIRECTORY}
+"${JQASSISTANT_BIN}"/jqassistant.sh scan -f ./${directoriesAndFilesToScan}
# Use jQAssistant to add dependencies between artifacts, package dependencies, artifact dependencies and the java version to the Neo4j Graph Database
echo "resetAndScan: Analyzing ${ARTIFACTS_DIRECTORY} with jQAssistant CLI version ${JQASSISTANT_CLI_VERSION}"