Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
112 changes: 112 additions & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
# Common settings that generally should always be used with your language specific settings

# Auto detect text files and perform LF normalization
# https://www.davidlaing.com/2012/09/19/customise-your-gitattributes-to-become-a-git-ninja/
* text=auto

#
# The above will handle all files NOT found below
#

# Documents
*.bibtex text diff=bibtex
*.doc diff=astextplain
*.DOC diff=astextplain
*.docx diff=astextplain
*.DOCX diff=astextplain
*.dot diff=astextplain
*.DOT diff=astextplain
*.pdf diff=astextplain
*.PDF diff=astextplain
*.rtf diff=astextplain
*.RTF diff=astextplain
*.md text
*.tex text diff=tex
*.adoc text
*.textile text
*.mustache text
*.csv text
*.tab text
*.tsv text
*.txt text
*.sql text

# Graphics
*.png binary
*.jpg binary
*.jpeg binary
*.gif binary
*.tif binary
*.tiff binary
*.ico binary
# SVG treated as an asset (binary) by default.
*.svg text
# If you want to treat it as binary,
# use the following line instead.
# *.svg binary
*.eps binary

# Scripts
*.bash text eol=lf
*.fish text eol=lf
*.sh text eol=lf
# These are explicitly windows files and should use crlf
*.bat text eol=crlf
*.cmd text eol=crlf
*.ps1 text eol=crlf

# Serialisation
*.json text
*.toml text
*.xml text
*.yaml text
*.yml text

# Archives
*.7z binary
*.gz binary
*.tar binary
*.tgz binary
*.zip binary

# Text files where line endings should be preserved
*.patch -text

#
# Exclude files from exporting
#

.gitattributes export-ignore
.gitignore export-ignore

# Java sources
*.java text diff=java
*.gradle text diff=java
*.gradle.kts text diff=java

# These files are text and should be normalized (Convert crlf => lf)
*.css text diff=css
*.df text
*.htm text diff=html
*.html text diff=html
*.js text
*.jsp text
*.jspf text
*.jspx text
*.properties text
*.tld text
*.tag text
*.tagx text
*.xml text

# These generated analysis-results shouldn't be taken into account for language statistics
analysis-results/** linguist-vendored

# These files are binary and should be left untouched
# (binary is a macro for -text -diff)
*.class binary
*.dll binary
*.ear binary
*.jar binary
*.so binary
*.war binary
15 changes: 15 additions & 0 deletions .github/pull_request_template.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
### 🚀 Feature

-

### ⚙️ Optimization

-

### 🛠 Fix

-

### 📖 Documentation

-
181 changes: 181 additions & 0 deletions .github/workflows/analyze-code-graph.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
name: Analyze Code Graph

on:
workflow_call:
inputs:
analysis-name:
description: "The name of the project to analyze. E.g. MyProject-1.0.0"
required: true
type: string
artifacts-upload-name:
description: "The name of the artifacts uploaded with 'actions/upload-artifact' containing the content of the 'artifacts' directory for the analysis."
required: false
type: string
default: ''
sources-upload-name:
description: "The name of the sources uploaded with 'actions/upload-artifact' containing the content of the 'source' directory for the analysis."
required: false
type: string
default: ''
analysis-arguments:
description: "The arguments to pass to the analysis script (default='--profile Neo4jv5-low-memory')."
required: false
type: string
default: '--profile Neo4jv5-low-memory'
typescript-scan-heap-memory:
description: "The heap memory in MB to use for the TypeScript scan (default=4096)"
required: false
type: string
default: '4096'
outputs:
uploaded-analysis-results:
description: "The name of the artifact uploaded with 'actions/upload-artifact' containing the analysis results."
value: ${{ jobs.analyze-code-graph.outputs.uploaded-analysis-results-artifact-name }}

jobs:
analyze-code-graph:
runs-on: ubuntu-latest
outputs:
uploaded-analysis-results-artifact-name: ${{ steps.set-analysis-results-artifact-name.outputs.uploaded-analysis-results-artifact-name }}
strategy:
matrix:
include:
- os: ubuntu-latest
java: 17
python: 3.11
miniforge: 24.9.0-0
steps:
- name: Assure that either artifacts-upload-name or sources-upload-name is set
if: inputs.artifacts-upload-name == '' && inputs.sources-upload-name == ''
run: echo "Please specify either the input parameter 'artifacts-upload-name' or 'sources-upload-name'."; exit 1
- name: Checkout code-graph-analysis-pipeline
uses: actions/checkout@v4
with:
repository: JohT/code-graph-analysis-pipeline
ref: 41f3e22b5bd65351474dd23effeee91fab849a12
path: code-graph-analysis-pipeline
persist-credentials: false

- name: (Java Setup) Java Development Kit (JDK) ${{ matrix.java }}
uses: actions/setup-java@v4
with:
distribution: "temurin"
java-version: ${{ matrix.java }}

# "Setup Python" can be skipped if jupyter notebook analysis-results aren't needed
- name: (Python Setup) Setup Cache for Conda package manager Miniforge
uses: actions/cache@v4
env:
# Increase this value to reset cache if etc/example-environment.yml has not changed
# Reference: https://github.com/conda-incubator/setup-miniconda#caching
CACHE_NUMBER: 0
with:
path: ~/conda_pkgs_dir
key:
${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-environments-${{hashFiles('**/environment.yml', '.github/workflows/*.yml') }}

- name: (Python Setup) Use version ${{ matrix.python }} with Conda package manager Miniforge
uses: conda-incubator/setup-miniconda@v3
with:
python-version: ${{ matrix.python }}
miniforge-version: ${{ matrix.miniforge }}
activate-environment: codegraph
environment-file: ./code-graph-analysis-pipeline/jupyter/environment.yml
auto-activate-base: false
use-only-tar-bz2: true # IMPORTANT: This needs to be set for caching to work properly!
- name: (Python Setup) Conda environment info
shell: bash -el {0}
run: conda info

- name: (Code Analysis Setup) Add code-graph-analysis-pipeline temporarily to .gitignore
shell: bash
run: |
echo "" >> .gitignore
echo "# Code Graph Analysis Pipeline" >> .gitignore
echo "code-graph-analysis-pipeline/" >> .gitignore

- name: (Code Analysis Setup) Setup Cache Analysis Downloads
uses: actions/cache@v4
with:
path: ./code-graph-analysis-pipeline/temp/downloads
key:
${{ runner.os }}-${{ hashFiles('**/*.sh') }}

- name: (Code Analysis Setup) Generate Neo4j Initial Password
id: generate-neo4j-initial-password
shell: bash
run: |
generated_password=$( LC_ALL=C tr -dc '[:graph:]' </dev/urandom | head -c 12; echo )
echo "::add-mask::$generated_password"
echo "neo4j-initial-password=$generated_password" >> "$GITHUB_OUTPUT"

- name: (Code Analysis Setup) Initialize Analysis
shell: bash
working-directory: code-graph-analysis-pipeline
env:
NEO4J_INITIAL_PASSWORD: ${{ steps.generate-neo4j-initial-password.outputs.neo4j-initial-password }}
run: ./init.sh ${{ inputs.analysis-name }}

- name: (Code Analysis Setup) Download sources for analysis
if: inputs.sources-upload-name != ''
uses: actions/download-artifact@v4
with:
name: ${{ inputs.sources-upload-name }}
path: code-graph-analysis-pipeline/temp/${{ inputs.analysis-name }}/source/${{ inputs.analysis-name }}

- name: (Code Analysis Setup) Download artifacts for analysis
if: inputs.artifacts-upload-name != ''
uses: actions/download-artifact@v4
with:
name: ${{ inputs.artifacts-upload-name }}
path: code-graph-analysis-pipeline/temp/${{ inputs.analysis-name }}/artifacts

- name: (Code Analysis) Analyze ${{ inputs.analysis-name }}
working-directory: code-graph-analysis-pipeline/temp/${{ inputs.analysis-name }}
# Shell type can be skipped if jupyter notebook analysis-results (and therefore conda) aren't needed
shell: bash -el {0}
env:
NEO4J_INITIAL_PASSWORD: ${{ steps.generate-neo4j-initial-password.outputs.neo4j-initial-password }}
ENABLE_JUPYTER_NOTEBOOK_PDF_GENERATION: "true"
IMPORT_GIT_LOG_DATA_IF_SOURCE_IS_PRESENT: "" # Options: "none", "aggregated", "full". default = "plugin" or ""
run: |
TYPESCRIPT_SCAN_HEAP_MEMORY=${{ inputs.typescript-scan-heap-memory }} ./../../scripts/analysis/analyze.sh ${{ inputs.analysis-arguments }}

- name: Assemble ENVIRONMENT_INFO
run: echo "ENVIRONMENT_INFO=-${{ matrix.java }}-python-${{ matrix.python }}-miniforge-${{ matrix.miniforge }}" >> $GITHUB_ENV

- name: Set artifact name for uploaded analysis results
id: set-analysis-results-artifact-name
run: echo "uploaded-analysis-results-artifact-name=code-analysis-results-java-${{ env.ENVIRONMENT_INFO }}" >> $GITHUB_OUTPUT

# Upload logs and unfinished analysis-results in case of an error for troubleshooting
- name: (Code Analysis Results) Archive failed run with logs and unfinished analysis-results
if: failure()
uses: actions/upload-artifact@v4
with:
name: java-code-analysis-logs-java-${{ matrix.java }}-python-${{ matrix.python }}-miniforge-${{ matrix.miniforge }}
path: |
./code-graph-analysis-pipeline/temp/**/runtime/*
./code-graph-analysis-pipeline/temp/**/reports/*
retention-days: 5

# Upload successful analysis-results in case they are needed for troubleshooting
- name: (Code Analysis Results) Archive successful analysis-results
if: success()
uses: actions/upload-artifact@v4
with:
name: ${{ steps.set-analysis-results-artifact-name.outputs.uploaded-analysis-results-artifact-name }}
path: ./code-graph-analysis-pipeline/temp/**/reports/*
if-no-files-found: error
retention-days: 5

# Upload Database Export
# Only possible after an export with "./../../scripts/analysis/analyze.sh --report DatabaseCsvExport"
# Won't be done here because of performance and security concerns
#- name: Archive exported database
# uses: actions/upload-artifact@v3
# with:
# name: typescript-code-analysis-database-export-${{ matrix.java }}-python-${{ matrix.python }}-miniforge-${{ matrix.miniforge }}
# path: ./code-graph-analysis-pipeline/temp/**/import
# if-no-files-found: error
# retention-days: 5
39 changes: 39 additions & 0 deletions .github/workflows/check-links-in-documentation.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
name: Check links in documentation

on:
pull_request:
branches:
- main
# Only watch root level Markdown documentation file changes
paths:
- 'README.md'
- '.github/workflows/check-links-in-documentation.yml' # also run when this file was changed
schedule:
- cron: "15 6 1 * *" # On the first day of each month at 6:15 o'clock

jobs:
analysis-results:
runs-on: ubuntu-latest
steps:
- name: Checkout GIT Repository
uses: actions/checkout@v4

- name: Setup node.js
uses: actions/setup-node@v4
with:
node-version-file: '.nvmrc'

- name: Skip on update of AxonFramework by bot (Renovate)
if: |
github.event_name == 'pull_request' &&
startsWith(github.event.pull_request.title, 'Update dependency AxonFramework') &&
github.event.pull_request.user.type == 'Bot'
run: |
echo "Skipping link check on AxonFramework updates since the updated links to the analysis-results will only be active "
echo "skip_link_check=true" >> $GITHUB_ENV

- name: Check links in top level documentation Markdown files
if: ${{ ! env.skip_link_check}}
run: npx --yes [email protected] --verbose --alive=200,202,206 --retry README.md
# Temporarily, everything is done using command line options rather than with the config file, which doesn't seem to work.
# Maybe related to https://github.com/tcort/markdown-link-check/issues/379 ?
24 changes: 24 additions & 0 deletions .github/workflows/check-renovate-config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
name: Check Renovate Configuration

on:
pull_request:
branches:
- main
# Only watch root level Renovate configuration changes
paths:
- 'renovate.json*'

jobs:
analysis-results:
runs-on: ubuntu-latest
steps:
- name: Checkout GIT Repository
uses: actions/checkout@v4

- name: Setup node.js
uses: actions/setup-node@v4
with:
node-version-file: '.nvmrc'

- name: Run renovate-config-validator
run: npx --yes --package [email protected] -- renovate-config-validator
Loading
Loading