diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 29de6ff8a3..0000000000 --- a/.coveragerc +++ /dev/null @@ -1,5 +0,0 @@ -[html] -directory = coverage - -[run] -data_file = .coverage_$LOCAL_RANK diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000000..1e35e0c496 --- /dev/null +++ b/.flake8 @@ -0,0 +1,4 @@ +[flake8] +max-line-length = 100 +extend-ignore = E203,E501,F401,E402,E714 +per-file-ignores = __init__.py:F401 \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..10eef953d5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,28 @@ +--- +name: Bug report +about: Create a report to help us improve the repository or project +title: "" +labels: bug +assignees: '' + +--- + +**Describe the bug** + +A clear and concise description of what the bug is. + +**Steps/Code to reproduce bug** + +Please list *minimal* steps or code snippet for us to be able to reproduce the bug. + +A helpful guide on on how to craft a minimal bug report http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports. + + +**Expected behavior** + +A clear and concise description of what you expected to happen. + + +**Additional context** + +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000..99d680b0ab --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,2 @@ +blank_issues_enabled: false + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000..7334f687d1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: "" +labels: enhancement +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 0000000000..b3d89a0ac1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,12 @@ +--- +name: QUESTION +about: Ask a question about Megatron-LM that is not a bug, regression or enhancement + request +title: "[QUESTION]" +labels: '' +assignees: '' + +--- + +**Your question** +Ask a clear and concise question about Megatron-LM. diff --git a/.github/ISSUE_TEMPLATE/regression.md b/.github/ISSUE_TEMPLATE/regression.md new file mode 100644 index 0000000000..10078d23a6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/regression.md @@ -0,0 +1,39 @@ +--- +name: REGRESSION +about: Report a regression in speed or accuracy due to a Megatron-LM update +title: "[REGRESSION]" +labels: '' +assignees: '' + +--- + +**Describe the regression** +A clear and concise description of what the regression is. + +**To Reproduce** +Steps to reproduce the behavior. The easier it is to reproduce the faster it will get maintainer attention. + +**Previous performance** +What speed or accuracy did you previously see. + +**New performance** +What speed or accuracy do you see after the update. + +**Stack trace/logs** +If applicable, add the stack trace or logs related to the regression. + +**Environment (please complete the following information):** + - Previous Megatron-LM commit ID + - New Megatron-LM commit ID + - Previous PyTorch version + - New PyTorch version + - Previous CUDA version + - New CUDA version + - Previous NCCL version + - New NCCL version + +**Proposed fix** +If you have a proposal for how to fix the issue state it here or link to a PR. + +**Additional context** +Add any other context about the problem here. diff --git a/.github/actions/action.yml b/.github/actions/action.yml new file mode 100644 index 0000000000..6a226d9e05 --- /dev/null +++ b/.github/actions/action.yml @@ -0,0 +1,292 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name: "Test Template" +description: "Template for running NeMo tests in a containerized environment" + +inputs: + timeout: + description: "Max runtime of test in minutes" + required: false + default: "30" + script: + description: "Test script to execute" + required: true + is-optional: + description: "Pass this job on failure." + required: false + default: "false" + is_unit_test: + description: "Upload coverage as unit test" + required: false + default: "false" + tag: + description: Latest or legacy test suite + required: true + test_case: + description: Test case to launch + required: true + model: + description: Model to launch + required: false + PAT: + description: "GitHub Personal Access Token" + required: true + +runs: + using: "composite" + steps: + - name: Copy data + shell: bash + if: inputs.is_unit_test == 'false' + env: + SOURCE_DIR: /mnt/datadrive/TestData/megatron-lm/artifacts + TARGET_DIR: /home/runner/_work/TestData/megatron-lm/artifacts + MODEL: ${{ inputs.model }} + run: | + mkdir -p $TARGET_DIR/text/data/ + + if [[ "$MODEL" == "bert" ]]; then + mkdir -p $TARGET_DIR/text/the_pile/bert_shard00/ + cp -a $SOURCE_DIR/text/the_pile/bert_shard00/. $TARGET_DIR/text/data/ + elif [[ "$MODEL" == "gpt" ]] || [[ "$MODEL" == "moe" ]]; then + cp -a $SOURCE_DIR/text/the_pile/shard00/. $TARGET_DIR/text/data/ + fi + + - name: Install curl, sudo + shell: bash + run: | + sudo apt-get update + sudo apt-get install -y curl uuid-runtime + + - name: Checkout repository + uses: actions/checkout@v2 + with: + path: ${{ github.workspace }}/Megatron-LM + + - name: Cache uv + uses: actions/cache@v4 + id: cache + with: + path: cache-mount + key: ${{ runner.os }}-uv-${{ hashFiles('**/uv.lock') }} + restore-keys: | + ${{ runner.os }}-uv- + + - name: Restore Docker cache mounts + uses: reproducible-containers/buildkit-cache-dance@5b81f4d29dc8397a7d341dba3aeecc7ec54d6361 + with: + cache-dir: cache-mount + dockerfile: docker/Dockerfile.ci.dev + skip-extraction: ${{ steps.cache.outputs.cache-hit }} + + - name: Setup python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + + - name: Download test data + shell: bash + env: + GH_TOKEN: ${{ inputs.PAT }} + TIMEOUT: ${{ inputs.timeout }} + IS_UNIT_TEST: ${{ inputs.is_unit_test == 'true' }} + run: | + echo "::group::Download test data" + pip install --no-cache-dir pygithub click + python tests/test_utils/python_scripts/download_unit_tests_dataset.py --assets-dir ./assets + echo "::endgroup::" + + - name: Create run-script (unit test) + shell: bash + if: inputs.is_unit_test == 'true' + run: | + echo "::group::Create run-script" + cmd=$(cat <<'RUN_TEST_EOF' + #!/bin/bash + + docker exec -t test_container_${{ github.run_id }} bash -c ' + set -e + bash /opt/megatron-lm/tests/unit_tests/run_ci_test.sh \ + --tag ${{ inputs.tag }} \ + --environment dev \ + --bucket '\''${{ inputs.test_case }}'\'' \ + --log-dir /opt/megatron-lm/outputs/logs + ' + + RUN_TEST_EOF + ) + echo "$cmd" | tee "job.sh" + echo "::endgroup::" + + - name: Create run-script (e2e test) + shell: bash + if: inputs.is_unit_test == 'false' + env: + MODEL: ${{ inputs.model }} + run: | + echo "::group::Create run-script" + cmd=$(cat <<'RUN_TEST_EOF' + #!/bin/bash + + + + docker exec -t test_container_${{ github.run_id }} bash -c ' + + set -e + ls -al /workspace/data + + if [[ "${{ inputs.model }}" == "bert" ]]; then + TRAINING_SCRIPT_PATH=pretrain_bert.py + elif [[ "${{ inputs.model }}" == "gpt" ]] || [[ "${{ inputs.model }}" == "moe" ]]; then + TRAINING_SCRIPT_PATH=pretrain_gpt.py + fi + + ARGUMENTS=( + "DATA_PATH=/workspace/data" + "DATA_CACHE_PATH=/workspace/data/cache" + "OUTPUT_PATH=$(pwd)/outputs/" + "TENSORBOARD_PATH=$(pwd)/tensorboard" + "CHECKPOINT_SAVE_PATH=$(pwd)/checkpoints" + "CHECKPOINT_LOAD_PATH=/workspace/checkpoints/$NAME" + "TRAINING_SCRIPT_PATH=$TRAINING_SCRIPT_PATH" + "TRAINING_PARAMS_PATH=./tests/functional_tests/test_cases/${{inputs.model}}/${{inputs.test_case}}/model_config.yaml" + "GOLDEN_VALUES_PATH=./tests/functional_tests/test_cases/${{inputs.model}}/${{inputs.test_case}}/golden_values_dev_dgx_h100.json" + "N_REPEAT=5" + "ENABLE_LIGHTWEIGHT_MODE=false" + "RECORD_CHECKPOINTS=false" + ) + + bash ./tests/functional_tests/shell_test_utils/run_ci_test.sh ${ARGUMENTS[@]} + ' + + RUN_TEST_EOF + ) + echo "$cmd" | tee "job.sh" + echo "::endgroup::" + + - name: Build container + shell: bash + env: + GH_TOKEN: ${{ inputs.PAT }} + run: | + echo "::group::Build test container" + docker build -f docker/Dockerfile.ci.dev --build-arg FROM_IMAGE_NAME="nvcr.io/nvidia/pytorch:25.06-py3" --target=main -t megatron-core . + echo "::endgroup::" + + - name: Start container + shell: bash + run: | + echo "::group::Start test container" + set -x + + cmd=$(cat <= MAX_CONCURRENCY: + print("Maximum concurrency reached, no new approvals will be made") + exit(0) + + # Get waiting CI workflows for test environment + print("Fetching deployments...") + pending_workflows = make_request("actions/runs?status=waiting").get("workflow_runs", []) + pending_workflows = [run for run in pending_workflows if run["name"] == "CICD NeMo"] + + # Sort deployments by creation date (oldest first) + print("Sorting workflows...") + pending_workflows = sorted(pending_workflows, key=lambda x: x["created_at"]) + + # Process each deployment + print("Processing ...") + for workflow in pending_workflows: + if total_workflows >= MAX_CONCURRENCY: + print("Maximum concurrency reached, stopping approvals") + break + + workflow_id = workflow["id"] + workflow_name = workflow["display_title"] + print(f"Approving workflow {workflow_name} with Run Id: {workflow_id}") + + deployment_url = f"actions/runs/{workflow_id}/pending_deployments" + deployment = make_request(deployment_url)[0] + environment_id = deployment["environment"]["id"] + + # Approve the deployment + status_data = { + "environment_ids": [environment_id], + "state": "approved", + "comment": "Automatically approved by queue manager" + } + result = make_request(deployment_url, method="POST", data=status_data) + + if result: + total_workflows += 1 + else: + print(f"Failed to approve deployment {deployment['id']}") + exit(1) + + EOF + notify: + if: failure() + runs-on: ubuntu-latest + needs: [approve-queue] + steps: + - name: Notify + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_WEBHOOK_ADMIN: + GITHUB_RUN_ID: ${{ github.run_id }} + GITHUB_REPOSITORY: ${{ github.repository }} + run: | + curl -X POST \ + -H 'Content-type: application/json' \ + --data "{\"text\":\":robot_joy: failed. Please review manually.\n\ncc ${SLACK_WEBHOOK_ADMIN}\"}" \ + $SLACK_WEBHOOK + diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml new file mode 100644 index 0000000000..dd22dd2f75 --- /dev/null +++ b/.github/workflows/cicd-main.yml @@ -0,0 +1,324 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name: CICD NeMo +on: + schedule: + - cron: "0 */2 * * *" + push: + branches: + - main + - "pull-request/[0-9]+" + - "deploy-release/*" + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-${{ github.event.label.name || 'main' }}-${{ github.event_name }} + cancel-in-progress: true + +permissions: + id-token: write + contents: read + +jobs: + pre-flight: + uses: NVIDIA-NeMo/FW-CI-templates/.github/workflows/_cicd_preflight.yml@v0.53.0 + + linting: + runs-on: ubuntu-latest + needs: [pre-flight] + if: | + !(needs.pre-flight.outputs.is_deployment_workflow == 'true' + || needs.pre-flight.outputs.docs_only == 'true') + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + - name: Install ruff + run: | + pip install ruff + + # - name: Check lint + # run: | + # pip install pre-commit==3.6.0 + # pre-commit install + # pre-commit run --all-files --show-diff-on-failure --color=always + + cicd-wait-in-queue: + runs-on: ubuntu-latest + needs: [pre-flight, linting] + environment: test + if: | + !(needs.pre-flight.outputs.is_ci_workload == 'true' + || needs.pre-flight.outputs.is_deployment_workflow == 'true' + || needs.pre-flight.outputs.docs_only == 'true') + steps: + - name: Running CI tests + run: | + echo "Running CI tests" + + cicd-unit-tests-latest: + strategy: + fail-fast: false + matrix: + include: + - bucket: "unit_tests" + - bucket: "unit_tests/data/" + - bucket: "unit_tests/dist_checkpointing/*.py" + - bucket: "unit_tests/dist_checkpointing/models/" + - bucket: "unit_tests/transformer/*.py" + - bucket: "unit_tests/transformer/moe" + needs: [pre-flight, cicd-wait-in-queue] + runs-on: nvidia-ci-aws-gpu-x8 + name: "${{ matrix.bucket }} - latest" + environment: nemo-ci + if: | + ( + success() + || needs.pre-flight.outputs.is_ci_workload == 'true' + || needs.pre-flight.outputs.force_run_all == 'true' + ) + && !cancelled() + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: main + uses: ./.github/actions + with: + test_case: tests/${{ matrix.bucket }} + tag: latest + timeout: ${{ matrix.timeout || 30 }} + is_unit_test: "true" + PAT: ${{ secrets.PAT }} + + cicd-functional-tests-latest: + strategy: + fail-fast: false + matrix: + include: + - model: "gpt" + test_case: "gpt3_mr_mcore_te_tp1_pp4_vp1_dist_optimizer_overlap_grad_reduce_param_gather_overlap_optimizer_dgx_a100_1N8G" + - model: "gpt" + test_case: "gpt3_mr_mcore_te_tp4_pp1_resume_torch_dist_dist_optimizer_overlap_grad_reduce_param_gather_dgx_a100_1N8G" + - model: "moe" + test_case: "gpt3_moe_mr_mcore_te_tp4_ep2_etp2_pp2_resume_torch_dist_dist_optimizer" + - model: "moe" + test_case: "gpt3_mcore_te_tp2_pp2_ep4_etp1_memory_speed" + needs: + - pre-flight + - cicd-wait-in-queue + - cicd-unit-tests-latest + runs-on: nvidia-ci-aws-gpu-x8 + name: "${{ matrix.model }}/${{ matrix.test_case }} - latest" + environment: nemo-ci + if: | + ( + success() + || needs.pre-flight.outputs.is_ci_workload == 'true' + || needs.pre-flight.outputs.force_run_all == 'true' + ) + && !cancelled() + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: main + uses: ./.github/actions + with: + test_case: ${{ matrix.test_case }} + model: ${{ matrix.model }} + tag: latest + timeout: ${{ matrix.timeout || 30 }} + is_unit_test: "false" + PAT: ${{ secrets.PAT }} + + Nemo_CICD_Test: + needs: + - pre-flight + - cicd-unit-tests-latest + - cicd-functional-tests-latest + if: | + ( + needs.pre-flight.outputs.docs_only == 'true' + || needs.pre-flight.outputs.is_deployment_workflow == 'true' + || success() + ) + && !cancelled() + runs-on: ubuntu-latest + permissions: write-all + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Get workflow result + id: result + env: + GH_TOKEN: ${{ github.token }} + RUN_ID: ${{ github.run_id }} + run: | + # Get workflow run details and check job conclusions + LATEST_ATTEMPT=$(gh run view $RUN_ID --json jobs -q '[.jobs[] | select(.conclusion != null) | .conclusion] | last') + NUM_FAILED=$(gh run view $RUN_ID --json jobs -q '[.jobs[] | select(.conclusion == "failure") | .name] | length') + NUM_CANCELLED=$(gh run view $RUN_ID --json jobs -q '[.jobs[] | select(.conclusion == "cancelled") | .name] | length') + + if [[ $NUM_FAILED -eq 0 && $NUM_CANCELLED -eq 0 ]]; then + RESULT="success" + elif [[ $NUM_CANCELLED -gt 0 ]]; then + RESULT="cancelled" + else + RESULT="failure" + fi + + # Output the final status + echo "code=$RESULT" | tee -a $GITHUB_OUTPUT + + - name: Checkout for GH CLI + uses: actions/checkout@v4 + + - name: Remove label if not cancelled + if: | + steps.result.outputs.code != 'cancelled' + && github.event.label.name == 'Run CICD' + && github.event.pull_request.head.repo.full_name == github.repository + env: + GH_TOKEN: ${{ github.token }} + PR_NUMBER: ${{ github.event.number }} + run: gh pr edit "$PR_NUMBER" --remove-label "Run CICD" + + - name: Pipeline successful, add PR comment + if: | + steps.result.outputs.code == 'success' + && github.event_name == 'pull_request' + && env.SLACK_WEBHOOK != '' + uses: peter-evans/create-or-update-comment@v4 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + REPOSITORY: ${{ github.repository }} + RUN_ID: ${{ github.run_id }} + with: + issue-number: ${{ github.event.number }} + body: | + [🤖]: Hi @${{ github.event.pull_request.user.login }} 👋, + + We wanted to let you know that a [CICD pipeline](https://github.com/${{ env.REPOSITORY }}/actions/runs/${{ env.RUN_ID }}) for this PR just finished successfully. + + So it might be time to merge this PR or get some approvals. + + //cc @chtruong814 @ko3n1g @pablo-garay @thomasdhc + + - name: "Pipeline not successful and not cancelled: Send Slack alert & create step summary" + if: | + steps.result.outputs.code == 'failure' + && github.event.label.name == 'Run CICD' + && env.SLACK_WEBHOOK != '' + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPOSITORY: ${{ github.repository }} + RUN_ID: ${{ github.run_id }} + PR_NUMBER: ${{ github.event.number }} + SERVER_URL: ${{ github.server_url }} + run: | + set -x + pip install PyGithub + export BRANCH_NAME=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}} + + python .github/scripts/notify.py + + - name: Exit + if: ${{ always() }} + env: + RESULT: ${{ steps.result.outputs.code }} + run: | + if [ $RESULT == "success" ]; then + exit 0 + else + exit 1 + fi + + Coverage_Fake: + runs-on: ubuntu-latest + needs: [Nemo_CICD_Test, pre-flight] + if: | + (needs.pre-flight.outputs.docs_only == 'true' + || needs.pre-flight.outputs.is_deployment_workflow == 'true') + && !cancelled() + environment: nemo-ci + steps: + - name: Generate fake coverage report + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.PAT }} + script: | + await github.rest.repos.createCommitStatus({ + owner: context.repo.owner, + repo: context.repo.repo, + sha: context.sha, + state: 'success', + description: 'No code changes - coverage check skipped', + context: 'codecov/patch' + }); + + Coverage: + runs-on: ubuntu-latest + needs: [Nemo_CICD_Test] + if: | + ( + needs.pre-flight.outputs.docs_only == 'false' + && needs.pre-flight.outputs.is_deployment_workflow == 'false' + && success() + ) + && !cancelled() + strategy: + matrix: + flag: [unit-test, e2e] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Download coverage reports of current branch + uses: actions/download-artifact@v4 + with: + pattern: coverage-${{ matrix.flag }}-* + + - name: List coverage files + run: find . -type f -name "*.xml" -o -name "*.lcov" + + - name: Get total coverage of current branch + shell: bash -x -e -u -o pipefail {0} + if: always() + run: | + pip install coverage + + ls -al . + ls -al coverage-*/ + coverage combine --keep $(ls coverage-*/.coverage) + coverage report -i + rm -rf coverage-* + ls -al + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v5 + with: + token: ${{ secrets.CODECOV_TOKEN }} + verbose: true + flags: ${{ matrix.flag }} + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: coverage-${{ matrix.flag }}-aggregated + path: | + .coverage + include-hidden-files: true diff --git a/.github/workflows/close-inactive-issue-pr.yml b/.github/workflows/close-inactive-issue-pr.yml new file mode 100644 index 0000000000..6eb2f7e113 --- /dev/null +++ b/.github/workflows/close-inactive-issue-pr.yml @@ -0,0 +1,21 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name: Stale-Close-Inactive-Issues-PRs +on: + schedule: + - cron: "30 1 * * *" + +jobs: + close-issues: + uses: NVIDIA-NeMo/FW-CI-templates/.github/workflows/_close_inactive_issue_pr.yml@v0.44.0 diff --git a/.github/workflows/community-bot.yml b/.github/workflows/community-bot.yml new file mode 100644 index 0000000000..57d482afa3 --- /dev/null +++ b/.github/workflows/community-bot.yml @@ -0,0 +1,26 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name: Community Bot + +on: + issues: + types: [opened, edited, reopened, closed, deleted] + issue_comment: + types: [created, edited, deleted] + +jobs: + community-bot: + uses: NVIDIA-NeMo/FW-CI-templates/.github/workflows/_community_bot.yml@v0.49.1 + secrets: + GH_TOKEN: ${{ secrets.PAT }} diff --git a/.github/workflows/copyright-check.yml b/.github/workflows/copyright-check.yml new file mode 100644 index 0000000000..5cd588e31b --- /dev/null +++ b/.github/workflows/copyright-check.yml @@ -0,0 +1,47 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Copyright check + +on: + push: + branches: + - main + - "pull-request/[0-9]+" + - "deploy-release/*" + +jobs: + pre-flight: + uses: NVIDIA-NeMo/FW-CI-templates/.github/workflows/_cicd_preflight.yml@v0.53.0 + + copyright-check: + needs: [pre-flight] + if: | + !(needs.pre-flight.outputs.docs_only == 'true' + || needs.pre-flight.outputs.is_deployment_workflow == 'true') + uses: NVIDIA-NeMo/FW-CI-templates/.github/workflows/_copyright_check.yml@v0.2.0 + + copyright-check-summary: + needs: [pre-flight, copyright-check] + if: | + ( + needs.pre-flight.outputs.docs_only == 'true' + || needs.pre-flight.outputs.is_deployment_workflow == 'true' + || success() + ) + && !cancelled() + runs-on: ubuntu-latest + steps: + - name: Result + run: echo Copyright check successful diff --git a/.gitignore b/.gitignore index cac3499524..144a8c2b89 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,13 @@ build *~ slurm* logs +.vscode +local/ +.gitmodules +wandb/ +onelogger.log +onelogger.err +.venv +runs/ +/test_cases/ +**/dist/ \ No newline at end of file diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3cd1c2f2e6..be4b658f2d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,299 +1,269 @@ -image: gitlab-master.nvidia.com:5005/adlr/megatron-lm/ngc/pytorch:22.12-py3_pytest-cov +.merge_train_rule: &merge_train_rule + UNIT_TEST: "yes" + UNIT_TEST_REPEAT: 1 + UNIT_TEST_TIMEOUT: 30 + INTEGRATION_TEST: "no" + INTEGRATION_TEST_SCOPE: mr + FUNCTIONAL_TEST: "yes" + FUNCTIONAL_TEST_SCOPE: mr-slim + FUNCTIONAL_TEST_REPEAT: 5 + FUNCTIONAL_TEST_TIME_LIMIT: 2700 + CLUSTER_A100: "" + CLUSTER_H100: "" + PUBLISH: "no" -stages: - - test - - cleanup +workflow: + rules: + # Do not trigger for forks + - if: $CI_PROJECT_NAMESPACE != "ADLR" || ($CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_PROJECT_PATH != "ADLR/megatron-lm") + when: never -variables: &VARS - SELENE_ADLR_CI_PATH: "/lustre/fsw/adlr/adlr-nlp/adlr_ci/megatron" - DATA_DIR: "/lustre/fsw/adlr/adlr-nlp/adlr_ci/megatron/data" - PYTORCH_IMAGE: gitlab-master.nvidia.com:5005/adlr/megatron-lm/ngc/pytorch:22.12-py3_pytest-cov - PYTHON_VIRTUAL_ENV: /lustre/fsw/adlr/adlr-nlp/adlr_ci/cienv/bin/activate - TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED: L0 # Can specify levels - TESTS_TO_RUN_AFTER_MERGING: L0 # Can specify levels - TESTS_TO_RUN_ON_THIS_COMMIT: unit_tests - TEST_REGEX_ON_THIS_COMMIT: NONE #https://github.com/google/re2/wiki/Syntax (Can define regex as in this spec) e.g /.*gpt3.*/ - DISPLAY_OUTPUT: "True" # Set to true for new tests to copy the logs for creating golden truth file + # ci-branches only for schedule + - if: $CI_COMMIT_BRANCH =~ /ci-/ && $CI_PIPELINE_SOURCE != "schedule" + when: never -unit_tests: - tags: - - docker_local_runner - stage: test - script: - - torchrun --nproc_per_node=8 -m pytest --cov-report=term --cov-report=html --cov=megatron/core tests/unit_tests - coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/' - artifacts: - paths: - - coverage - expire_in: 30 days - only: - - merge_requests + # For schedules pipelines + - if: $CI_PIPELINE_SOURCE == "schedule" + auto_cancel: + on_new_commit: none -.selene_test_resume_checkpoint_launcher: &selene-test-resume-checkpoint-launcher - tags: - - ssh_selene_runner - stage: test - script: &selene-test-resume-launcher-script - - echo "Running selene resume from checkpoint test. " - - pwd - - export BUILD_DIR=`pwd` - - export RUN_NAME=resume_${RUN_MODEL}_tp${TP_SIZE}_pp${PP_SIZE}_${NUM_NODES}nodes - - echo "In case of error check ${SELENE_ADLR_CI_PATH}/${CI_PIPELINE_ID}/${RUN_NAME}/results directory for result logs." - - export TP_SIZE PP_SIZE NUM_NODES MAX_STEPS - - export DATA_DIR=$DATA_DIR - - echo "Run name is $RUN_NAME" - - mkdir -p $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/checkpoints - - mkdir -p $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/logs - - mkdir -p $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/results - - rm -rf $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/checkpoints/* - - rm -rf $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/logs/* - - rm -rf $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/results/* - - export BASE_DIR=$SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME - - export LOGS_DIR=$BASE_DIR/logs - - export RESULTS_DIR=$BASE_DIR/results - - export CHECKPOINTS_DIR=$BASE_DIR/checkpoints - - echo "Submitting job" - - sbatch_submission=`sbatch $BUILD_DIR/tests/functional_tests/test_scripts/$RUN_MODEL/sbatch_${RUN_MODEL}_distributed_resume_checkpoint_test.sh --export=BASE_DIR,BUILD_DIR,DATA_DIR,TP_SIZE,PP_SIZE,NUM_NODES` - - export SLURM_JOBID=$(echo $sbatch_submission| grep 'Submitted batch job' | awk '{ print $4 }'); - - bash $BUILD_DIR/tests/functional_tests/shell_test_utils/jobwait.sh $SLURM_JOBID - - \[ ! -z ${SLURM_JOBID} \] && echo -e " --------------------------------------------------\n" - "----------WAITING FOR SLURM JOB TO BEGIN-----------\n" - "---------------------------------------------------\n" - "$(scontrol show job=${SLURM_JOBID})\n" - "---------------------------------------------------\n" - # Gitlab logs collapsible section markers - - echo -e "\e[0Ksection_end:`date +%s`:slurm_setup\r\e[0K" - # Follow output of the job - - echo "Finished job" - - export SLURM_STATE=$(sacct -j "${SLURM_JOBID}" --format State --parsable2 --noheader |& head -n 1) - - echo "Slurm job state $SLURM_STATE" - - if [[ "$SLURM_STATE" != "COMPLETED" ]]; then echo "Slurm job did not complete. See ${SELENE_ADLR_CI_PATH}/${CI_PIPELINE_ID}/${RUN_NAME}/results directory for result logs. Skipping pytest."; exit 1; fi - - source $PYTHON_VIRTUAL_ENV - - pytest $BUILD_DIR/tests/functional_tests/python_test_utils/test_resume_checkpoint_pipeline.py || echo "Pytest failed. See ${SELENE_ADLR_CI_PATH}/${CI_PIPELINE_ID}/${RUN_NAME}/results directory for result logs." - - echo "Completed the job" - rules: - - if: $TEST_LEVEL =~ $TESTS_TO_RUN_ON_THIS_COMMIT || $CI_JOB_NAME =~ $TESTS_TO_RUN_ON_THIS_COMMIT || $CI_JOB_NAME =~ $TEST_REGEX_ON_THIS_COMMIT - when: always - - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGING' - when: always - - if: $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED - when: always - allow_failure: false + # For manual pipelines + - if: $CI_PIPELINE_SOURCE == "web" -.selene_test_launcher: &selene-test-launcher - tags: - - ssh_selene_runner - stage: test - script: &selene-test-launcher-script - - echo "Running selene test" - - echo "$CI_MERGE_REQUEST_APPROVED" - - pwd - - export BUILD_DIR=`pwd` - - RUN_NAME=${RUN_MODEL}_tp${TP_SIZE}_pp${PP_SIZE}_${NUM_NODES}nodes_${MAX_STEPS}steps - - if [[ $USE_TE == 1 ]]; then RUN_NAME=${RUN_NAME}_te_enabled; fi - - export $RUN_NAME - - echo "In case of error check ${SELENE_ADLR_CI_PATH}/${CI_PIPELINE_ID}/${RUN_NAME}/results directory for result logs." - - export USE_TE TP_SIZE PP_SIZE NUM_NODES MAX_STEPS VP_SIZE - - export MBS GBS - - export DATA_DIR=$DATA_DIR - - echo "Run name is $RUN_NAME" - - mkdir -p $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/checkpoints - - mkdir -p $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/logs - - mkdir -p $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/results - - rm -rf $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/checkpoints/* - - rm -rf $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/logs/* - - rm -rf $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/results/* - - export BASE_DIR=$SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME - - export LOGS_DIR=$BASE_DIR/logs - - export RESULTS_DIR=$BASE_DIR/results - - export CHECKPOINTS_DIR=$BASE_DIR/checkpoints - - echo "Submitting job" - - sbatch_submission=`sbatch $BUILD_DIR/tests/functional_tests/test_scripts/$RUN_MODEL/sbatch_${RUN_MODEL}_distributed_test.sh --export=BASE_DIR,BUILD_DIR,DATA_DIR,USE_TE,TP_SIZE,PP_SIZE,NUM_NODES,MAX_STEPS,VP_SIZE,MBS,GBS` - - export SLURM_JOBID=$(echo $sbatch_submission| grep 'Submitted batch job' | awk '{ print $4 }'); - - bash $BUILD_DIR/tests/functional_tests/shell_test_utils/jobwait.sh $SLURM_JOBID - - \[ ! -z ${SLURM_JOBID} \] && echo -e " --------------------------------------------------\n" - "----------WAITING FOR SLURM JOB TO BEGIN-----------\n" - "---------------------------------------------------\n" - "$(scontrol show job=${SLURM_JOBID})\n" - "---------------------------------------------------\n" - # Gitlab logs collapsible section markers - - echo -e "\e[0Ksection_end:`date +%s`:slurm_setup\r\e[0K" - # Follow output of the job - - echo "Finished job" - - echo "Slurm log dump start ------------------------------------------------------------" - - cat $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/results/* - - echo "Slurm log dump end --------------------------------------------------------------" - - python3 $BUILD_DIR/tests/functional_tests/python_test_utils/check_slurm_job_completion.py $SLURM_JOBID - - if [ $? -ne 0 ]; then echo "Slurm job did not complete. See ${SELENE_ADLR_CI_PATH}/${CI_PIPELINE_ID}/${RUN_NAME}/results directory for result logs. Skipping pytest."; exit 1; fi - - source $PYTHON_VIRTUAL_ENV - - | - if [[ "$DISPLAY_OUTPUT" == "True" ]]; then - python3 $BUILD_DIR/tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py $LOGS_DIR $RUN_NAME - fi - - | - if [[ $USE_TE -ne 1 ]]; then - echo "Checking against ground truth file" - export EXPECTED_METRICS_FILE=$BUILD_DIR/tests/functional_tests/test_results/$RUN_MODEL/$RUN_NAME.json - pytest $BUILD_DIR/tests/functional_tests/python_test_utils/test_ci_pipeline.py || echo "Pytest failed. See ${SELENE_ADLR_CI_PATH}/${CI_PIPELINE_ID}/${RUN_NAME}/results directory for result logs." - fi - - echo "Completed the job" - rules: - - if: $TEST_LEVEL =~ $TESTS_TO_RUN_ON_THIS_COMMIT || $CI_JOB_NAME =~ $TESTS_TO_RUN_ON_THIS_COMMIT || $CI_JOB_NAME =~ $TEST_REGEX_ON_THIS_COMMIT - when: always - - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGING' - when: always - - if: $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED - when: always - allow_failure: false + # For push to main + - if: $CI_PIPELINE_SOURCE == 'push' && ($CI_COMMIT_BRANCH == "main" || $CI_COMMIT_BRANCH == "dev") + variables: + UNIT_TEST: "no" + INTEGRATION_TEST: "no" + FUNCTIONAL_TEST: "yes" + FUNCTIONAL_TEST_SCOPE: mr + FUNCTIONAL_TEST_REPEAT: 5 + FUNCTIONAL_TEST_RECORD_CHECKPOINTS: "no" + FUNCTIONAL_TEST_TIME_LIMIT: 2700 + CLUSTER_A100: "" + CLUSTER_H100: "" + PUBLISH: "no" + auto_cancel: + on_new_commit: interruptible -train.te_gpt3.345m_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 1 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "50:00" - TEST_LEVEL: L0 + # For merge-trains that need to be fast-tracked + - if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merge_train' && $CI_MERGE_REQUEST_LABELS =~ /fast-track/ + variables: + UNIT_TEST: "yes" + UNIT_TEST_REPEAT: 1 + UNIT_TEST_TIMEOUT: 30 + INTEGRATION_TEST: "no" + FUNCTIONAL_TEST: "no" + CLUSTER_A100: "" + CLUSTER_H100: "" + PUBLISH: "no" -train.gpt3.345m_tp4_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: L0 + # For normal merge-trains + - if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merge_train' + variables: *merge_train_rule -train.gpt3.345m_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: L0 + # For MRs with integration suite + - if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_LABELS =~ /Run tests/ + variables: + UNIT_TEST: "yes" + UNIT_TEST_REPEAT: 1 + UNIT_TEST_TIMEOUT: 30 + INTEGRATION_TEST: "yes" + INTEGRATION_TEST_SCOPE: mr + FUNCTIONAL_TEST: "no" + FUNCTIONAL_TEST_SCOPE: mr-slim + FUNCTIONAL_TEST_REPEAT: 5 + FUNCTIONAL_TEST_TIME_LIMIT: 2700 + CLUSTER_A100: "" + CLUSTER_H100: "" + PUBLISH: "no" -train.gpt3.345m_tp1_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: L0 + # For MRs with nightly + - if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_LABELS =~ /Run nightly/ + variables: + UNIT_TEST: "yes" + UNIT_TEST_REPEAT: 1 + UNIT_TEST_TIMEOUT: 30 + INTEGRATION_TEST: "no" + FUNCTIONAL_TEST: "yes" + FUNCTIONAL_TEST_SCOPE: nightly + FUNCTIONAL_TEST_REPEAT: 5 + FUNCTIONAL_TEST_RECORD_CHECKPOINTS: "no" + FUNCTIONAL_TEST_TIME_LIMIT: 2700 + CLUSTER_A100: "" + CLUSTER_H100: "" + PUBLISH: "no" -train.gpt3.345m_tp1_pp4_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: L0 + # For MRs with weekly + - if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_LABELS =~ /Run weekly/ + variables: + UNIT_TEST: "yes" + UNIT_TEST_REPEAT: 1 + UNIT_TEST_TIMEOUT: 30 + INTEGRATION_TEST: "no" + FUNCTIONAL_TEST: "yes" + FUNCTIONAL_TEST_SCOPE: weekly + FUNCTIONAL_TEST_REPEAT: 1 + FUNCTIONAL_TEST_RECORD_CHECKPOINTS: "no" + FUNCTIONAL_TEST_TIME_LIMIT: 9000 + CLUSTER_A100: "" + CLUSTER_H100: "" + PUBLISH: "no" -resume.checkpoint.gpt3.345m_tp1_pp2_1node: - <<: *selene-test-resume-checkpoint-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - TIME_LIMIT: "30:00" - TEST_LEVEL: L0 + # For MRs with heavy suite + - if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_LABELS =~ /Run functional tests/ + variables: + UNIT_TEST: "yes" + UNIT_TEST_REPEAT: 1 + UNIT_TEST_TIMEOUT: 30 + INTEGRATION_TEST: "no" + FUNCTIONAL_TEST: "yes" + FUNCTIONAL_TEST_SCOPE: mr + FUNCTIONAL_TEST_REPEAT: 5 + FUNCTIONAL_TEST_TIME_LIMIT: 2700 + CLUSTER_A100: "" + CLUSTER_H100: "" + PUBLISH: "no" -train.bert.345m_tp4_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: L0 + # Default MRs + - if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' + variables: + UNIT_TEST: "yes" + UNIT_TEST_REPEAT: 1 + UNIT_TEST_TIMEOUT: 30 + INTEGRATION_TEST: "no" + FUNCTIONAL_TEST: "no" + PUBLISH: "no" -train.bert.345m_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: L0 + - when: never -train.bert.345m_tp1_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: L0 + auto_cancel: + on_new_commit: interruptible -train.bert.345m_tp1_pp4_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: L0 +stages: + - build + - test + - integration_tests + - functional_tests + - publish -resume.checkpoint.bert.345m_tp1_pp2_1node: - <<: *selene-test-resume-checkpoint-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - TIME_LIMIT: "30:00" - TEST_LEVEL: L0 +default: + interruptible: true + retry: + max: 2 + when: runner_system_failure -cleanup.selene: - tags: - - ssh_selene_runner - stage: cleanup - variables: - <<: [*VARS] - script: - - set +e - - NUM_CLEANUP=`find ${SELENE_ADLR_CI_PATH}/* -type d -ctime +20 | grep -v data | wc -l` - - find ${SELENE_ADLR_CI_PATH}/* -type d -ctime +20 | grep -v data | xargs rm -rf - - echo "Finished cleaning $NUM_CLEANUP directories older than 20 days everything in Selene" - allow_failure: true - rules: - - when: always +variables: + UNIT_TEST: + value: "yes" + options: + - "yes" + - "no" + description: To run the funtional test suite + UNIT_TEST_REPEAT: + value: "1" + description: "Number of repetitions" + UNIT_TEST_TIMEOUT: + value: "30" + description: Timeout (minutes) for Unit tests (all repeats) + INTEGRATION_TEST: + value: "yes" + options: + - "yes" + - "no" + description: To run the integration test suite + INTEGRATION_TEST_SCOPE: + value: "mr" + options: + - "mr" + - "nightly" + - "weekly" + - "pre-release" + - "release" + description: "Testsuite to run (only for INTEGRATION_TEST=yes)" + INTEGRATION_TEST_TIME_LIMIT: + value: "900" + description: "Timeout in seconds per test" + INTEGRATION_TEST_CASES: + value: "all" + description: "Comma-separated list of test_cases to run. Use 'all' to run the full suite." + FUNCTIONAL_TEST: + value: "yes" + options: + - "yes" + - "no" + description: To run the funtional test suite + FUNCTIONAL_TEST_SCOPE: + value: "mr" + options: + - "mr" + - "nightly" + - "weekly" + - "pre-release" + - "release" + description: "Testsuite to run (only for FUNCTIONAL_TEST=yes)" + FUNCTIONAL_TEST_REPEAT: + value: "5" + description: "Number of repetitions per test" + FUNCTIONAL_TEST_TIME_LIMIT: + value: "2700" + description: "Timeout in seconds per test" + FUNCTIONAL_TEST_CASES: + value: "all" + description: "Comma-separated list of test_cases to run. Use 'all' to run the full suite." + FUNCTIONAL_TEST_NAME: + description: "Name of functional test run (only for pre-release and release)" + value: "$$CI_COMMIT_SHA" + FUNCTIONAL_TEST_RECORD_CHECKPOINTS: + value: "no" + description: "Record golden checkpoints" + options: + - "yes" + - "no" + CLUSTER_A100: + value: "dgxa100_dracooci" + options: + - "dgxa100_dracooci" + - "dgxa100_dracooci-ord" + description: "Cluster for A100 workloads" + CLUSTER_H100: + value: "dgxh100_coreweave" + options: + - "dgxh100_coreweave" + - "dgxh100_eos" + description: "Cluster for H100 workloads" + PUBLISH: + value: "no" + options: + - "yes" + - "no" + description: Build and publish a wheel to PyPi + PUBLISH_COMMIT: + value: "$$CI_COMMIT_SHA" + description: Which commit to publish + PUBLISH_VERSION_BUMP_BRANCH: + value: "$$CI_COMMIT_BRANCH" + description: Which branch to target for version bump + PUBLISH_SCOPE: + value: "code-freeze" + options: + - "code-freeze" + - "release" + - "review-reminder" + - "upgrade-dependencies" + description: Type of publish (freeze or final release) + + # CI wide variables + CI_MCORE_LTS_IMAGE: ${GITLAB_ENDPOINT}:5005/adlr/megatron-lm/mcore_ci_lts + CI_MCORE_DEV_IMAGE: ${GITLAB_ENDPOINT}:5005/adlr/megatron-lm/mcore_ci_dev + CI_NEMO_IMAGE: ${GITLAB_ENDPOINT}:5005/adlr/megatron-lm/nemo_ci + UTILITY_IMAGE: ${GITLAB_ENDPOINT}:5005/adlr/megatron-lm/mcore_utility + TE_GIT_REF: "" + +include: + - .gitlab/stages/00.pre.yml + - .gitlab/stages/01.build.yml + - .gitlab/stages/02.test.yml + - .gitlab/stages/03.integration-tests.yml + - .gitlab/stages/04.functional-tests.yml + - .gitlab/stages/05.publish.yml diff --git a/.gitlab/labeler-config.yml b/.gitlab/labeler-config.yml new file mode 100644 index 0000000000..0e218e4bae --- /dev/null +++ b/.gitlab/labeler-config.yml @@ -0,0 +1,36 @@ +CI: + - .gitlab-ci.yml + - Dockerfile.ci.lts + - Dockerfile.ci.dev + - .github/** + - .gitlab/** + +Datasets: + - megatron/core/datasets/** + +BERT: + - megatron/core/models/bert/** + +GPT: + - megatron/core/models/gpt/** + +RETRO: + - megatron/core/models/retro/** + +Dist-Ckpt: + - megatron/core/dist_checkpointing + +Dist-Opt: + - megatron/core/optimizer/distrib_optimizer + +Inference: + - megatron/core/inference + +MoE: + - megatron/core/transformer/moe + +Tests: + - tests/** + +ParallelState: + - megatron/core/parallel_state.py diff --git a/.gitlab/scripts/build.sh b/.gitlab/scripts/build.sh new file mode 100644 index 0000000000..960af10462 --- /dev/null +++ b/.gitlab/scripts/build.sh @@ -0,0 +1,57 @@ +#! /bin/bash + +set -x +env +eval "IMAGE=\$$IMAGE" + +# Start a named container in detached mode +docker run -d --name download_test_data -w /workdir/ python:3.12-slim bash -c 'sleep infinity' +docker cp tests/. download_test_data:/workdir/tests +docker exec -e GH_TOKEN=$GH_TOKEN download_test_data bash -c ' + ls -al /workdir/ + pip install --no-cache-dir pygithub click + python tests/test_utils/python_scripts/download_unit_tests_dataset.py --assets-dir ./assets +' +docker cp download_test_data:/workdir/assets ./ +docker rm -f download_test_data + +docker context create tls-environment +docker buildx create --name container --driver=docker-container --use tls-environment + +ADDITIONAL_PARAMS=() + +if [[ "$CI_COMMIT_BRANCH" == "ci-rebuild-mcore-nemo-image" || "$CI_COMMIT_BRANCH" == "main" || "$CI_COMMIT_BRANCH" == "dev" ]]; then + ADDITIONAL_PARAMS+=("--pull") + ADDITIONAL_PARAMS+=("--cache-to type=registry,ref=${IMAGE}-buildcache:main,mode=max") + ADDITIONAL_PARAMS+=("-t ${IMAGE}:${CI_COMMIT_BRANCH}") +elif [[ -n "$CI_MERGE_REQUEST_IID" ]]; then + ADDITIONAL_PARAMS+=("--cache-to type=registry,ref=${IMAGE}-buildcache:${CI_MERGE_REQUEST_IID},mode=max") + ADDITIONAL_PARAMS+=("-t ${IMAGE}:${CI_MERGE_REQUEST_IID}") +fi + +if [[ "$CI_COMMIT_BRANCH" == "ci-nightly" ]]; then + ADDITIONAL_PARAMS+=("-t ${IMAGE}:nightly") +fi + +if [[ -n "$TE_GIT_REF" ]]; then + ADDITIONAL_PARAMS+=("--build-arg TE_COMMIT=${TE_GIT_REF}") +fi + +echo $(git rev-parse HEAD) + +JET_API_VERSION=$(curl -s -u "$ARTIFACTORY_USER:$ARTIFACTORY_TOKEN" "https://sc-hw-artf.nvidia.com/artifactory/api/pypi/hw-joc-pypi/simple/jet-api/" | grep -o 'href="../../jet-api/[0-9.]*/' | sed 's|href="../../jet-api/||;s|/||' | sort -V -r | head -n1) + +DOCKER_BUILDKIT=1 docker build \ + --secret id=JET_INDEX_URLS \ + --secret id=LOGGER_INDEX_URL \ + --target $STAGE \ + -f docker/$FILE \ + -t ${IMAGE}:${CI_PIPELINE_ID} \ + --builder=container \ + --build-arg JET_API_VERSION=$JET_API_VERSION \ + --cache-from type=registry,ref=${IMAGE}-buildcache:${CI_MERGE_REQUEST_IID} \ + --cache-from type=registry,ref=${IMAGE}-buildcache:main \ + --build-arg FROM_IMAGE_NAME=$BASE_IMAGE \ + --push \ + --progress plain \ + ${ADDITIONAL_PARAMS[@]} . diff --git a/.gitlab/scripts/check_imports.py b/.gitlab/scripts/check_imports.py new file mode 100644 index 0000000000..f46987d8d8 --- /dev/null +++ b/.gitlab/scripts/check_imports.py @@ -0,0 +1,208 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#!/usr/bin/env python3 +""" +Import checker script for megatron.hub package. + +This script recursively discovers all Python modules in the specified package +and attempts to import them, reporting any import errors. +""" + +import importlib +import os +import sys +import traceback +from typing import Dict, List, Tuple + +import click + + +class ImportChecker: + """Check imports for all modules in a package.""" + + def __init__(self, package_name: str = "megatron.core", verbose: bool = False): + self.package_name = package_name + self.success_count = 0 + self.failure_count = 0 + self.graceful_count = 0 + self.skipped_count = 0 + self.failures: Dict[str, str] = {} + self.successes: List[str] = [] + self.graceful_failures: Dict[str, str] = {} + self.skipped: List[str] = [] + + # Modules to skip (known problematic ones) + self.skip_patterns = { + "__pycache__", + ".pytest_cache", + ".git", + "test_", + "_test", + } + + # Add current directory to Python path if not already there + current_dir = os.getcwd() + if current_dir not in sys.path: + sys.path.insert(0, current_dir) + + def should_skip_module(self, module_name: str) -> bool: + """Check if a module should be skipped.""" + for pattern in self.skip_patterns: + if pattern in module_name: + return True + return False + + def discover_modules(self, package_path: str) -> List[str]: + """Discover all Python modules in the given package path.""" + modules = [] + + package = importlib.import_module(package_path) + package_path = package.__path__[0] + + # Walk through all Python files + for root, dirs, files in os.walk(package.__path__[0]): + # Skip hidden directories and __pycache__ + dirs[:] = [d for d in dirs if not d.startswith(".") and d != "__pycache__"] + + for file in files: + if file.endswith(".py") and not file.startswith("."): + # Convert file path to module name + rel_path = os.path.relpath(os.path.join(root, file), package_path) + module_parts = rel_path.replace(os.sep, ".").replace(".py", "") + + # Handle __init__.py files + if module_parts.endswith(".__init__"): + module_parts = module_parts[:-9] # Remove .__init__ + + full_module_name = ( + f"{self.package_name}.{module_parts}" + if module_parts + else self.package_name + ) + + if not self.should_skip_module(full_module_name): + modules.append(full_module_name) + + # Remove duplicates and sort + modules = sorted(list(set(modules))) + + return modules + + def import_module(self, module_name: str) -> Tuple[str, str]: + """ + Try to import a module and return success status and error message. + + Returns: + Tuple of (status: str, error_message: str) + status can be: "success", "graceful", or "failed" + """ + try: + if module_name in sys.modules: + del sys.modules[module_name] + + importlib.import_module(module_name) + return "success", "" + + except Exception: + tb = traceback.format_exc() + if "UnavailableError" in tb: + return "graceful", "UnavailableError detected during import" + return "failed", f"{str(tb)}" + + def check_all_imports(self): + """Check imports for all discovered modules.""" + print(f"Discovering modules in package '{self.package_name}'...") + modules = self.discover_modules(self.package_name) + + if not modules: + print("No modules found!") + return + + print(f"Found {len(modules)} modules to check") + print("=" * 60) + + for i, module_name in enumerate(modules, 1): + status, error_msg = self.import_module(module_name) + + if status == "success": + self.success_count += 1 + self.successes.append(module_name) + elif status == "graceful": + self.graceful_count += 1 + self.graceful_failures[module_name] = error_msg + else: # failed + self.failure_count += 1 + self.failures[module_name] = error_msg + + """Print a summary of the import check results.""" + total = ( + self.success_count + + self.failure_count + + self.graceful_count + + self.skipped_count + ) + + print("\n" + "=" * 60) + print("IMPORT CHECK SUMMARY") + print("=" * 60) + print(f"Total modules checked: {total}") + print( + f"Successful imports: {self.success_count} ({self.success_count / total * 100:.1f}%)" + ) + print( + f"Gracefully handled: {self.graceful_count} ({self.graceful_count / total * 100:.1f}%)" + ) + print( + f"Failed imports: {self.failure_count} ({self.failure_count / total * 100:.1f}%)" + ) + if self.skipped_count > 0: + print( + f"Skipped modules: {self.skipped_count} ({self.skipped_count / total * 100:.1f}%)" + ) + + if self.graceful_failures: + print(f"\n🟡 GRACEFULLY HANDLED ({len(self.graceful_failures)}):") + print("-" * 40) + + if self.failures: + print(f"\n❌ FAILED IMPORTS ({len(self.failures)}):") + print("-" * 40) + for module_name, error_msg in self.failures.items(): + print(f"\n• {module_name}") + # Show only the first few lines of error to keep output manageable + error_lines = error_msg.split("\n") + for line in error_lines: + # if self.package_name.replace(".", os.sep) not in line: + # continue + if line.strip(): + print(f" {line}") + + return self.failure_count == 0 + + +@click.command() +@click.option( + "--package-name", + required=True, + help="Package name to check imports for", +) +def main(package_name: str): + """Main entry point.""" + checker = ImportChecker(package_name=package_name) + successful = checker.check_all_imports() + exit(0 if successful else 1) + + +if __name__ == "__main__": + main() diff --git a/.gitlab/scripts/fetch-legacy-suite.sh b/.gitlab/scripts/fetch-legacy-suite.sh new file mode 100644 index 0000000000..775a0c0ddd --- /dev/null +++ b/.gitlab/scripts/fetch-legacy-suite.sh @@ -0,0 +1,70 @@ +#!/bin/bash +set -euxo pipefail + +# Default values +MCORE_REPO="https://github.com/nvidia/megatron-lm.git" +MCORE_MR_COMMIT="main" +MCORE_BACKWARDS_COMMIT="" + +# Parse command line arguments +usage() { + cat < labels + - gitlab-mr-labeler -f .gitlab/labeler-config.yml -t ${PROJECT_ACCESS_TOKEN_MCORE} --debug true + - cat labels + after_script: + - | + source labels + curl --header "PRIVATE-TOKEN: ${PROJECT_ACCESS_TOKEN_MCORE}" --url "https://${GITLAB_ENDPOINT}/api/v4/projects/${CI_PROJECT_ID}/merge_requests/${CI_MERGE_REQUEST_IID}" --data-urlencode "add_labels=$LABELS" -X PUT + +pre:maybe_cherry_pick_commit: + rules: + - if: '$CI_COMMIT_BRANCH == "main" && $CI_PIPELINE_SOURCE == "push"' + - when: never + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + stage: .pre + image: nentangso/alpine-git-curl-jq + variables: + GIT_STRATEGY: "clone" + script: + - set -x + - set +e + - SHA=$(git rev-list --no-merges -n 1 HEAD) + - MESSAGE=$(git log -n 1 --pretty=format:%s $SHA) + - MR_ID=$(echo $MESSAGE | awk -F'!' '{print $2}' | awk '{print $1}' ) + - git remote set-url origin "https://gitlab-ci-token:${PROJECT_ACCESS_TOKEN_MCORE}@${GITLAB_ENDPOINT}/$CI_PROJECT_NAMESPACE/megatron-lm.git" + - git config --global user.email "mcore-bot@nvidia.com" + - git config --global user.name "Mcore Bot" + - | + MR=$(curl --header "PRIVATE-TOKEN: ${PROJECT_ACCESS_TOKEN_MCORE}" --url "https://${GITLAB_ENDPOINT}/api/v4/projects/${CI_PROJECT_ID}/merge_requests/${MR_ID}") + + LABELS=$(echo -E $MR | jq '.labels | join(",")' | tr -d '"') + AUTHOR_ID=$(echo -E $MR | jq '.author.id' | tr -d '"') + AUTHOR_NAME=$(echo -E $MR | jq '.author.username' | tr -d '"') + TITLE=$(echo -E $MR | jq '.title' | tr -d '"') + MILESTONE_ID=$(echo -E $MR | jq '.milestone.id' | tr -d '"') + TARGET_BRANCHES=$(echo "$LABELS" | grep -o 'core_[^,]*') + + if [[ $TARGET_BRANCHES == "" ]]; then + echo Nothing to cherry pick + exit 0 + fi + + echo $TARGET_BRANCHES | while read -r RELEASE_BRANCH ; do + TARGET_BRANCH_EXISTS_OK=$([[ "$(git ls-remote --heads origin refs/heads/$RELEASE_BRANCH)" != "" ]] && echo true || echo false) + + if [[ "$TARGET_BRANCH_EXISTS_OK" == "false" ]]; then + echo Release branch does not yet exist, will not cherry-pick + continue + fi + + ( + git fetch origin $RELEASE_BRANCH:$RELEASE_BRANCH + git switch --force-create cherry-pick-$MR_ID-$RELEASE_BRANCH $RELEASE_BRANCH + git cherry-pick $SHA + git push -u origin --force cherry-pick-$MR_ID-$RELEASE_BRANCH + git checkout main + ) + + CHERRYPICK_SUCCESSFUL=$? + + if [[ $CHERRYPICK_SUCCESSFUL -eq 0 ]]; then + curl \ + --header "PRIVATE-TOKEN: $PAT" \ + --url https://${GITLAB_ENDPOINT}/api/v4/projects/${CI_PROJECT_ID}/merge_requests \ + -d "source_branch=cherry-pick-$MR_ID-$RELEASE_BRANCH" \ + -d "target_branch=$RELEASE_BRANCH" \ + -d "title=Cherry pick \`$TITLE ($MR_ID)\` into \`$RELEASE_BRANCH\`" \ + -d "labels=cherry-pick" \ + -d "reviewer_ids=$AUTHOR_ID" \ + -d "milestone_id=$MILESTONE_ID" \ + -d "description=[🤖]: Hi @$AUTHOR_NAME 👋,

we've cherry picked \`$TITLE ($MR_ID)\` into \`$RELEASE_BRANCH\` for you! 🚀

Please review and approve this cherry pick by your convenience\!" + + else + URL=https://${GITLAB_ENDPOINT}/ADLR/megatron-lm/-/merge_requests/$MR_ID + + MESSAGE='{ + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "beep boop 🤖: Cherry-pick of <'$URL'|!'$MR_ID'> failed\ncc '$SLACK_ADMIN'" + } + } + ] + }' + + curl -X POST -H "Content-type: application/json" --data "$MESSAGE" ${MCORE_NOTIFICATION_HOOK} + + fi + + done + interruptible: false + +pre:check_milestone: + extends: [.pre_rules] + image: badouralix/curl-jq + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + script: + - env + - | + MILESTONE=$(curl --header "PRIVATE-TOKEN: ${PROJECT_ACCESS_TOKEN_MCORE}" --url "https://${GITLAB_ENDPOINT}/api/v4/projects/${CI_PROJECT_ID}/merge_requests/${CI_MERGE_REQUEST_IID}" | jq '.milestone') + - | + if [[ "$MILESTONE" == "null" ]]; then + LATEST_MILESTONE=$(curl --header "PRIVATE-TOKEN: ${PROJECT_ACCESS_TOKEN_MCORE}" --url "https://${GITLAB_ENDPOINT}/api/v4/projects/${CI_PROJECT_ID}/milestones?state=active&order_by=due_date&sort=desc" | jq '.[0].id') + curl --request PUT --header "PRIVATE-TOKEN: ${PROJECT_ACCESS_TOKEN_MCORE}" --url "https://${GITLAB_ENDPOINT}/api/v4/projects/${CI_PROJECT_ID}/merge_requests/${CI_MERGE_REQUEST_IID}" --data "milestone_id=${LATEST_MILESTONE}" + echo "Applied latest milestone (ID: ${LATEST_MILESTONE}) to this MR" + fi + +pre:check_status_of_main: + extends: [.pre_rules] + image: python:3.10 + timeout: 7 days + variables: + KUBERNETES_SERVICE_MEMORY_REQUEST: 32Gi + KUBERNETES_SERVICE_MEMORY_LIMIT: 32Gi + KUBERNETES_SERVICE_CPU_REQUEST: 8 + KUBERNETES_SERVICE_CPU_LIMIT: 12 + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + script: + - env + - pip install --no-cache-dir python-gitlab click + - export RO_API_TOKEN=${PROJECT_ACCESS_TOKEN_MCORE} + - export GITLAB_ENDPOINT + - python tests/test_utils/python_scripts/check_status_of_main.py --target-branch "$CI_MERGE_REQUEST_TARGET_BRANCH_NAME" + rules: + - if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merge_train' && $CI_MERGE_REQUEST_LABELS =~ /fast-track/ + when: never + - if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merge_train' + when: always + - when: never diff --git a/.gitlab/stages/01.build.yml b/.gitlab/stages/01.build.yml new file mode 100644 index 0000000000..aea73a3889 --- /dev/null +++ b/.gitlab/stages/01.build.yml @@ -0,0 +1,71 @@ +.build_rules: + rules: + - when: on_success + stage: test + +.build_image: + extends: [.build_rules, .dind_rules] + stage: build + tags: + - arch/amd64 + - origin/jet-fleet + - env/prod + - ${TAG} + services: + - name: docker:24.0.5-dind + variables: + HEALTHCHECK_TCP_PORT: "2376" + timeout: 180m + variables: + DOCKER_HOST: tcp://docker:2376 + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_TLS_VERIFY: 1 + DOCKER_CERT_PATH: "$DOCKER_TLS_CERTDIR/client" + TAG: purpose/builder-large + STAGE: jet + MCORE_BACKWARDS_REF: core_r0.14.0 + KUBERNETES_SERVICE_MEMORY_REQUEST: 90Gi + KUBERNETES_SERVICE_MEMORY_LIMIT: 90Gi + SHARED_PATH: /builds/$CI_PROJECT_PATH/shared + script: + - eval PUBLISH_COMMIT=$PUBLISH_COMMIT + - apk add bash curl git + - export TE_GIT_REF=$TE_GIT_REF + - export GH_TOKEN=$GH_TOKEN + - bash .gitlab/scripts/build.sh + + - git fetch origin $MCORE_BACKWARDS_REF + - MCORE_BACKWARDS_COMMIT=$(git rev-parse FETCH_HEAD) + + - echo "MCORE_MR_COMMIT=$CI_COMMIT_SHA" | tee -a build.env + - echo "MCORE_BACKWARDS_COMMIT=$MCORE_BACKWARDS_COMMIT" | tee -a build.env + - cat build.env + retry: + max: 2 + artifacts: + reports: + dotenv: build.env + +test:build_image: + extends: [.build_image] + parallel: + matrix: + - IMAGE: CI_MCORE_LTS_IMAGE + FILE: Dockerfile.ci.lts + BASE_IMAGE: nvcr.io/nvidia/pytorch:24.01-py3 + - IMAGE: CI_MCORE_DEV_IMAGE + FILE: Dockerfile.ci.dev + BASE_IMAGE: nvcr.io/nvidia/pytorch:25.06-py3 + - IMAGE: UTILITY_IMAGE + FILE: Dockerfile.linting + BASE_IMAGE: python:3.10 + +test:build_nemo_image: + extends: [.build_image] + variables: + IMAGE: CI_NEMO_IMAGE + FILE: Dockerfile.ci.nemo + BASE_IMAGE: nvcr.io/nvidian/nemo:nightly + rules: + - if: $FUNCTIONAL_TEST == "yes" || $INTEGRATION_TEST == "yes" || $CI_COMMIT_BRANCH == "ci-rebuild-mcore-nemo-image" + when: on_success diff --git a/.gitlab/stages/02.test.yml b/.gitlab/stages/02.test.yml new file mode 100644 index 0000000000..72f1491b07 --- /dev/null +++ b/.gitlab/stages/02.test.yml @@ -0,0 +1,417 @@ +.test_rules: + rules: + - if: $PUBLISH == "yes" + when: never + - when: on_success + stage: test + +include: + - template: Security/Secret-Detection.gitlab-ci.yml + +wait_for_resources: + extends: [.test_rules] + needs: + - test:linting_formatting + - test:linting_copyright + - job: test:linting_secret_detection + optional: true + - test:build_image + image: python:3.10 + timeout: 7 days + variables: + KUBERNETES_SERVICE_MEMORY_REQUEST: 32Gi + KUBERNETES_SERVICE_MEMORY_LIMIT: 32Gi + KUBERNETES_SERVICE_CPU_REQUEST: 8 + KUBERNETES_SERVICE_CPU_LIMIT: 12 + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + script: + - env + - pip install --no-cache-dir python-gitlab click + - export RO_API_TOKEN=${PROJECT_ACCESS_TOKEN_MCORE} + - export GITLAB_ENDPOINT + - export NUM_CONCURRENT_JOBS + - python tests/test_utils/python_scripts/wait_for_resources.py --pipeline-id $CI_PIPELINE_ID --target-branch $CI_MERGE_REQUEST_TARGET_BRANCH_NAME + rules: + - if: $CI_MERGE_REQUEST_LABELS =~ /fast-track/ + when: never + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + when: on_success + - when: never + +test:unit_tests_configure: + extends: [.test_rules] + needs: + - test:build_image + - job: wait_for_resources + optional: true + image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID} + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + before_script: + - git rm -r tests/test_utils/local_recipes || true + - git submodule add --force https://gitlab-ci-token:${CI_JOB_TOKEN}@${GITLAB_ENDPOINT}/ADLR/megatron-lm-convergence-tests.git tests/test_utils/local_recipes + - ls tests/test_utils/local_recipes + script: + - env + - set -x + - | + A100_CLUSTER=$([[ "$CLUSTER_A100" != "" ]] && echo $CLUSTER_A100 || echo $DEFAULT_A100_CLUSTER) + H100_CLUSTER=$([[ "$CLUSTER_H100" != "" ]] && echo $CLUSTER_H100 || echo $DEFAULT_H100_CLUSTER) + - | + ARGS=( + "--scope unit-tests" + "--n-repeat ${UNIT_TEST_REPEAT}" + "--time-limit $(( UNIT_TEST_TIMEOUT * 60 ))" + "--test-cases all" + "--cluster dgxh100_coreweave" + "--platform dgx_h100" + "--partition batch_short,batch" + "--container-image ${UTILITY_IMAGE}" + "--container-tag ${CI_PIPELINE_ID}" + "--dependent-job test:unit_tests_configure" + "--slurm-account ${CI_SLURM_ACCOUNT}" + "--no-enable-warmup" + ) + - | + export PYTHONPATH=$(pwd) + python tests/test_utils/python_scripts/generate_jet_trigger_job.py \ + ${ARGS[@]} \ + --environment "lts" \ + --tag "legacy" \ + --output-path "unit-test-job-lts-legacy.yaml" + - | + export PYTHONPATH=$(pwd) + python tests/test_utils/python_scripts/generate_jet_trigger_job.py \ + ${ARGS[@]} \ + --environment "lts" \ + --tag "latest" \ + --output-path "unit-test-job-lts-latest.yaml" + - | + export PYTHONPATH=$(pwd) + python tests/test_utils/python_scripts/generate_jet_trigger_job.py \ + ${ARGS[@]} \ + --environment "dev" \ + --tag "legacy" \ + --output-path "unit-test-job-dev-legacy.yaml" + - | + export PYTHONPATH=$(pwd) + python tests/test_utils/python_scripts/generate_jet_trigger_job.py \ + ${ARGS[@]} \ + --environment "dev" \ + --tag "latest" \ + --output-path "unit-test-job-dev-latest.yaml" + rules: + - if: $UNIT_TEST == 'yes' && $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED != "true" + allow_failure: true + when: on_success + - if: $UNIT_TEST == 'yes' && $UNIT_TEST_REPEAT != '0' + when: on_success + artifacts: + paths: + - unit-test-job-dev-legacy.yaml + - unit-test-job-dev-latest.yaml + - unit-test-job-lts-legacy.yaml + - unit-test-job-lts-latest.yaml + - tests/test_utils/local_recipes + +.unit_tests_run: + needs: + - test:linting_formatting + - test:linting_copyright + - job: test:linting_secret_detection + optional: true + - test:unit_tests_configure + - test:build_image + extends: [.test_rules] + trigger: + include: + - artifact: unit-test-job-$ENVIRONMENT-$TAG.yaml + job: test:unit_tests_configure + strategy: depend + variables: + RO_API_TOKEN: $PAT + CONTAINER_TAG: $CI_PIPELINE_ID + CI_MCORE_LTS_IMAGE: $CI_MCORE_LTS_IMAGE + GITLAB_ENDPOINT: $GITLAB_ENDPOINT + PARENT_PIPELINE_ID: $CI_PIPELINE_ID + MCORE_MR_COMMIT: $MCORE_MR_COMMIT + MCORE_BACKWARDS_COMMIT: $MCORE_BACKWARDS_COMMIT + + inherit: + variables: true + rules: + - if: $UNIT_TEST == 'yes' && $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED != "true" + allow_failure: true + when: on_success + - if: $UNIT_TEST == 'yes' && $UNIT_TEST_REPEAT != '0' + when: on_success + +test:unit_tests_pyt(DEV)_mcore(legacy): + extends: [.unit_tests_run] + variables: + ENVIRONMENT: dev + TAG: legacy + rules: + - if: $CI_MERGE_REQUEST_TARGET_BRANCH_NAME =~ /^core_r/ + when: never + - if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME != 'main' + when: never + - if: $UNIT_TEST == 'yes' && $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED != "true" + allow_failure: true + when: on_success + - if: $UNIT_TEST == 'yes' && $UNIT_TEST_REPEAT != '0' + when: on_success + +test:unit_tests_pyt(LTS)_mcore(legacy): + extends: [.unit_tests_run] + variables: + ENVIRONMENT: lts + TAG: legacy + rules: + - if: $CI_MERGE_REQUEST_TARGET_BRANCH_NAME =~ /^core_r/ + when: never + - if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME != 'main' + when: never + - if: $UNIT_TEST == 'yes' && $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED != "true" + allow_failure: true + when: on_success + - if: $UNIT_TEST == 'yes' && $UNIT_TEST_REPEAT != '0' + when: on_success + +test:unit_tests_pyt(DEV)_mcore(latest): + extends: [.unit_tests_run] + variables: + ENVIRONMENT: dev + TAG: latest + +test:unit_tests_pyt(LTS)_mcore(latest): + extends: [.unit_tests_run] + variables: + ENVIRONMENT: lts + TAG: latest + +test:unit_tests_notify: + extends: [.test_rules] + image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID} + needs: + - test:unit_tests_pyt(DEV)_mcore(latest) + - test:unit_tests_pyt(LTS)_mcore(latest) + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + script: + - env + - | + if [[ "$CI_COMMIT_BRANCH" == "dev" ]]; then + export WEBHOOK_URL=${MCORE_NOTIFICATION_HOOK_DEV} + else + export WEBHOOK_URL=${MCORE_NOTIFICATION_HOOK} + fi + - export RO_API_TOKEN=${PROJECT_ACCESS_TOKEN_MCORE} + - export GITLAB_ENDPOINT + - export TAG_TEAM=$([[ "$CI_COMMIT_BRANCH" == "main" ]] && echo "1" || "0") + - export TEAM_SLUG=$SLACK_ADMIN + - | + python tests/test_utils/python_scripts/notify.py \ + --pipeline-id "${CI_PIPELINE_ID}" \ + --check-for unit-tests \ + --pipeline-context "unit-tests-extended" \ + --pipeline-created-at "${CI_PIPELINE_CREATED_AT}" + artifacts: + when: always + paths: + - scripts + rules: + - if: $CI_PIPELINE_SOURCE == "schedule" && ($CI_COMMIT_BRANCH == "ci-unit-test-extended" || "ci-dev-unit-test-extended") + when: always + - when: never + +test:linting_docs_build: + extends: [.test_rules] + image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID} + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + needs: [test:build_image] + script: + - cd .. + - rm -rf documentation && git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@${GITLAB_ENDPOINT}/nemo-megatron-core-tme/documentation.git + - mv megatron-lm/ documentation/ + - cd documentation/ + - ./repo docs + +test:linting_formatting: + extends: [.test_rules] + image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID} + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + needs: [test:build_image] + variables: + GIT_STRATEGY: "clone" + script: + - | + if [[ "$CI_PIPELINE_SOURCE" != "merge_request_event" ]]; then + exit 0 + fi + - set +e + - git fetch origin main:main + - | + if [[ "$CI_MERGE_REQUEST_PROJECT_PATH" == "$CI_MERGE_REQUEST_SOURCE_PROJECT_PATH" ]]; then + bash tools/autoformat.sh + set -e + git fetch origin $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME + git checkout $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME + git config --global user.email "mcore-bot@nvidia.com" + git config --global user.name "Mcore Bot" + git remote set-url origin "https://gitlab-ci-token:${PAT}@${GITLAB_ENDPOINT}/$CI_PROJECT_NAMESPACE/megatron-lm.git" + git add -A . + git commit -m "chore: Format files" || true + git push -u origin $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME + fi + - env + - BASE_REF="$CI_MERGE_REQUEST_TARGET_BRANCH_NAME" CHECK_ONLY=true SKIP_DOCS=$([[ "$CI_MERGE_REQUEST_LABELS" == *"Skip docs"* ]] && echo "true" || echo "false") bash tools/autoformat.sh + +test:linting_copyright: + extends: [.test_rules] + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID} + needs: [test:build_image] + script: + - git fetch origin main + - bash tools/copyright.sh + +# Override from template +secret_detection: + rules: + - when: never + +# Inherit and modify template +test:linting_secret_detection: + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + extends: [".secret-analyzer"] + needs: [test:build_image] + variables: + GIT_DEPTH: 0 + SECRET_DETECTION_LOG_OPTIONS: ${CI_MERGE_REQUEST_DIFF_BASE_SHA}..${CI_COMMIT_SHA} + allow_failure: false + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + - when: never + script: + - apk add jq + - /analyzer run + - | + if [[ $(cat gl-secret-detection-report.json | jq '.vulnerabilities | length > 0') == true ]]; then + echo "Atleast one vulnerability has been found" + cat gl-secret-detection-report.json | jq '.' + exit 1 + fi + +test:unit_tests_x_coverage_report: + extends: [.test_rules] + needs: + - job: test:unit_tests_pyt(DEV)_mcore(latest) + - job: test:unit_tests_pyt(LTS)_mcore(latest) + image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID} + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + script: + - env + - export RO_API_TOKEN=${PROJECT_ACCESS_TOKEN_MCORE} + - export GITLAB_ENDPOINT + - python tests/test_utils/python_scripts/download_coverage_results.py --pipeline-id ${CI_PIPELINE_ID} + - coverage combine --keep $(ls coverage_results/*/coverage_report) + - coverage report + - coverage xml + coverage: "/TOTAL.+ ([0-9]{1,3}%)/" + artifacts: + reports: + coverage_report: + coverage_format: cobertura + path: coverage.xml + rules: + - if: $UNIT_TEST == 'yes' && $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED != "true" + allow_failure: true + when: on_success + - if: $UNIT_TEST == 'yes' && $UNIT_TEST_REPEAT != '0' + when: on_success + +test:safe_imports: + extends: [.test_rules] + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/builder-large + - team/megatron + services: + - name: docker:24.0.5-dind + variables: + HEALTHCHECK_TCP_PORT: "2376" + variables: + KUBERNETES_SERVICE_MEMORY_REQUEST: 32Gi + KUBERNETES_SERVICE_MEMORY_LIMIT: 32Gi + KUBERNETES_SERVICE_CPU_REQUEST: 8 + KUBERNETES_SERVICE_CPU_LIMIT: 12 + image: + name: python:3.11 + entrypoint: [""] + needs: [test:build_image] + script: + - env + - python -m ensurepip --upgrade + - python -m pip install --no-cache-dir -e . + - python -m pip install --no-cache-dir click + - python .gitlab/scripts/check_imports.py --package-name megatron.core + rules: + - if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME != 'main' + when: never + - if: $UNIT_TEST == 'yes' && $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED != "true" + allow_failure: true + when: on_success + - if: $UNIT_TEST == 'yes' && $UNIT_TEST_REPEAT != '0' + when: on_success + retry: + max: 2 diff --git a/.gitlab/stages/03.integration-tests.yml b/.gitlab/stages/03.integration-tests.yml new file mode 100644 index 0000000000..df4d84234b --- /dev/null +++ b/.gitlab/stages/03.integration-tests.yml @@ -0,0 +1,143 @@ +.integration_tests_rules: + stage: integration_tests + rules: + - if: $INTEGRATION_TEST == "yes" + when: on_success + - when: never + +default: + id_tokens: + VAULT_JWT_TOKEN: + aud: https://stg.vault.nvidia.com + +include: + - project: dl/jet/gitlab-templates + ref: main + file: downstreams.yml + +integration:configure: + needs: + - test:build_image + - job: test:unit_tests_pyt(DEV)_mcore(latest) + optional: true + - job: test:unit_tests_pyt(LTS)_mcore(latest) + optional: true + - job: test:build_nemo_image + extends: [.integration_tests_rules] + image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID} + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + before_script: + - git rm -r tests/test_utils/local_recipes || true + - git submodule add --force https://gitlab-ci-token:${CI_JOB_TOKEN}@${GITLAB_ENDPOINT}/ADLR/megatron-lm-convergence-tests.git tests/test_utils/local_recipes + - ls tests/test_utils/local_recipes + script: + - set -x + - | + A100_CLUSTER=$([[ "$CLUSTER_A100" != "" ]] && echo $CLUSTER_A100 || echo $DEFAULT_A100_CLUSTER) + H100_CLUSTER=$([[ "$CLUSTER_H100" != "" ]] && echo $CLUSTER_H100 || echo $DEFAULT_H100_CLUSTER) + - | + ARGS=( + "--scope $INTEGRATION_TEST_SCOPE" + "--n-repeat 1" + "--time-limit $INTEGRATION_TEST_TIME_LIMIT" + "--test-cases $INTEGRATION_TEST_CASES" + "--container-image ${UTILITY_IMAGE}" + "--container-tag ${CI_PIPELINE_ID}" + "--slurm-account ${CI_SLURM_ACCOUNT}" + "--no-enable-warmup" + "--dependent-job integration:configure" + "--enable-lightweight-mode" + ) + - | + export PYTHONPATH=$(pwd) + python tests/test_utils/python_scripts/generate_jet_trigger_job.py \ + ${ARGS[@]} \ + --environment dev \ + --platform dgx_a100 \ + --cluster $A100_CLUSTER \ + --output-path "functional-test-job-dev-A100.yaml" + - | + export PYTHONPATH=$(pwd) + python tests/test_utils/python_scripts/generate_jet_trigger_job.py \ + ${ARGS[@]} \ + --environment dev \ + --platform dgx_h100 \ + --cluster $H100_CLUSTER \ + --output-path "functional-test-job-dev-H100.yaml" + - | + export PYTHONPATH=$(pwd) + python tests/test_utils/python_scripts/generate_jet_trigger_job.py \ + ${ARGS[@]} \ + --environment lts \ + --platform dgx_a100 \ + --cluster $A100_CLUSTER \ + --output-path "functional-test-job-lts-A100.yaml" + - | + export PYTHONPATH=$(pwd) + python tests/test_utils/python_scripts/generate_jet_trigger_job.py \ + ${ARGS[@]} \ + --environment lts \ + --platform dgx_h100 \ + --cluster $H100_CLUSTER \ + --output-path "functional-test-job-lts-H100.yaml" + artifacts: + paths: + - functional-test-job-lts-A100.yaml + - functional-test-job-lts-H100.yaml + - functional-test-job-dev-H100.yaml + - functional-test-job-dev-A100.yaml + - tests/test_utils/local_recipes + +.integration_run: + needs: + - integration:configure + - test:build_image + - job: wait_for_resources + optional: true + extends: [.integration_tests_rules] + trigger: + include: + - artifact: functional-test-job-$ENVIRONMENT-$CLUSTER.yaml + job: integration:configure + strategy: depend + variables: + RO_API_TOKEN: $PAT + CONTAINER_TAG: $CI_PIPELINE_ID + CI_MCORE_LTS_IMAGE: $CI_MCORE_LTS_IMAGE + GITLAB_ENDPOINT: $GITLAB_ENDPOINT + PARENT_PIPELINE_ID: $CI_PIPELINE_ID + DASHBOARD_ENDPOINT: $DASHBOARD_ENDPOINT + MCORE_MR_COMMIT: $MCORE_MR_COMMIT + MCORE_BACKWARDS_COMMIT: $MCORE_BACKWARDS_COMMIT + inherit: + variables: true + +integration:run_lts_dgx_a100: + extends: [.integration_run] + variables: + ENVIRONMENT: lts + CLUSTER: A100 + +integration:run_lts_dgx_h100: + extends: [.integration_run] + variables: + ENVIRONMENT: lts + CLUSTER: H100 + +integration:run_dev_dgx_a100: + extends: [.integration_run] + variables: + ENVIRONMENT: dev + CLUSTER: A100 + +integration:run_dev_dgx_h100: + extends: [.integration_run] + variables: + ENVIRONMENT: dev + CLUSTER: H100 diff --git a/.gitlab/stages/04.functional-tests.yml b/.gitlab/stages/04.functional-tests.yml new file mode 100644 index 0000000000..084787e8ec --- /dev/null +++ b/.gitlab/stages/04.functional-tests.yml @@ -0,0 +1,258 @@ +.functional_tests_rules: + stage: functional_tests + rules: + - if: $FUNCTIONAL_TEST == "yes" + when: on_success + - when: never +default: + id_tokens: + VAULT_JWT_TOKEN: + aud: https://stg.vault.nvidia.com + +include: + - project: dl/jet/gitlab-templates + ref: main + file: downstreams.yml + +functional:configure: + needs: + - test:build_image + - test:build_nemo_image + - job: test:unit_tests_pyt(DEV)_mcore(latest) + optional: true + - job: test:unit_tests_pyt(LTS)_mcore(latest) + optional: true + - job: integration:run_lts_dgx_a100 + optional: true + - job: integration:run_dev_dgx_a100 + optional: true + - job: integration:run_lts_dgx_h100 + optional: true + - job: integration:run_dev_dgx_h100 + optional: true + extends: [.functional_tests_rules] + image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID} + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + before_script: + - git rm -r tests/test_utils/local_recipes || true + - git submodule add --force https://gitlab-ci-token:${CI_JOB_TOKEN}@${GITLAB_ENDPOINT}/ADLR/megatron-lm-convergence-tests.git tests/test_utils/local_recipes + - ls tests/test_utils/local_recipes + script: + - set -x + - | + A100_CLUSTER=$([[ "$CLUSTER_A100" != "" ]] && echo $CLUSTER_A100 || echo $DEFAULT_A100_CLUSTER) + H100_CLUSTER=$([[ "$CLUSTER_H100" != "" ]] && echo $CLUSTER_H100 || echo $DEFAULT_H100_CLUSTER) + - | + RECORD_CHECKPOINTS=$([[ "$CI_MERGE_REQUEST_LABELS" == *"Record checkpoints"* || "$FUNCTIONAL_TEST_RECORD_CHECKPOINTS" == "yes" ]] && echo "true" || echo "false") + - | + if [[ "$FUNCTIONAL_TEST_SCOPE" == "release" || "$FUNCTIONAL_TEST_SCOPE" == "pre-release" ]]; then + FUNCTIONAL_TEST_NAME=$(eval echo $FUNCTIONAL_TEST_NAME) + RELEASE_ARGS=( + "--run-name" + $FUNCTIONAL_TEST_NAME + "--wandb-experiment" + $(echo $FUNCTIONAL_TEST_NAME | tr '/' '-') + ) + else + RELEASE_ARGS=() + fi + - | + ARGS=( + "--scope $FUNCTIONAL_TEST_SCOPE" + "--n-repeat $FUNCTIONAL_TEST_REPEAT" + "--time-limit $FUNCTIONAL_TEST_TIME_LIMIT" + "--test-cases $FUNCTIONAL_TEST_CASES" + "--container-image ${UTILITY_IMAGE}" + "--container-tag ${CI_PIPELINE_ID}" + "--dependent-job functional:configure" + "--record-checkpoints ${RECORD_CHECKPOINTS}" + "--slurm-account ${CI_SLURM_ACCOUNT}" + "--no-enable-warmup" + ) + - | + export PYTHONPATH=$(pwd) + python tests/test_utils/python_scripts/generate_jet_trigger_job.py \ + ${ARGS[@]} \ + --environment dev \ + --platform dgx_a100 \ + --cluster $A100_CLUSTER \ + --output-path "functional-test-job-dev-A100.yaml" \ + ${RELEASE_ARGS[@]} + - | + export PYTHONPATH=$(pwd) + python tests/test_utils/python_scripts/generate_jet_trigger_job.py \ + ${ARGS[@]} \ + --environment dev \ + --platform dgx_h100 \ + --cluster $H100_CLUSTER \ + --output-path "functional-test-job-dev-H100.yaml" \ + ${RELEASE_ARGS[@]} + - | + export PYTHONPATH=$(pwd) + python tests/test_utils/python_scripts/generate_jet_trigger_job.py \ + ${ARGS[@]} \ + --environment lts \ + --platform dgx_a100 \ + --cluster $A100_CLUSTER \ + --output-path "functional-test-job-lts-A100.yaml" \ + ${RELEASE_ARGS[@]} + - | + export PYTHONPATH=$(pwd) + python tests/test_utils/python_scripts/generate_jet_trigger_job.py \ + ${ARGS[@]} \ + --environment lts \ + --platform dgx_h100 \ + --cluster $H100_CLUSTER \ + --output-path "functional-test-job-lts-H100.yaml" \ + ${RELEASE_ARGS[@]} + artifacts: + paths: + - functional-test-job-lts-A100.yaml + - functional-test-job-lts-H100.yaml + - functional-test-job-dev-A100.yaml + - functional-test-job-dev-H100.yaml + - tests/test_utils/local_recipes + +.functional_run: + needs: + - functional:configure + - test:build_image + extends: [.functional_tests_rules] + trigger: + include: + - artifact: functional-test-job-$ENVIRONMENT-$CLUSTER.yaml + job: functional:configure + strategy: depend + variables: + RO_API_TOKEN: $PAT + CONTAINER_TAG: $CI_PIPELINE_ID + CI_MCORE_LTS_IMAGE: $CI_MCORE_LTS_IMAGE + GITLAB_ENDPOINT: $GITLAB_ENDPOINT + PARENT_PIPELINE_ID: $CI_PIPELINE_ID + DASHBOARD_ENDPOINT: $DASHBOARD_ENDPOINT + MCORE_MR_COMMIT: $MCORE_MR_COMMIT + MCORE_BACKWARDS_COMMIT: $MCORE_BACKWARDS_COMMIT + CLUSTER: $CLUSTER + + inherit: + variables: true + +functional:run_lts_dgx_a100: + extends: [.functional_run] + variables: + ENVIRONMENT: lts + CLUSTER: A100 + +functional:run_lts_dgx_h100: + extends: [.functional_run] + variables: + ENVIRONMENT: lts + CLUSTER: H100 + +functional:run_dev_dgx_a100: + extends: [.functional_run] + variables: + ENVIRONMENT: dev + CLUSTER: A100 + +functional:run_dev_dgx_h100: + extends: [.functional_run] + variables: + ENVIRONMENT: dev + CLUSTER: H100 + +functional:run_nemo: + extends: [.functional_tests_rules] + trigger: + project: "dl/joc/nemo-ci" + branch: main-mirror + strategy: depend + inherit: + variables: true + variables: + MCORE_COMMIT: $CI_COMMIT_SHA + TEST_NEMO2_MODULE: "True" + ALLOW_FAILURE_DEPENDENCY: "True" + TESTS_TO_RUN_ON_THIS_COMMIT: nightly + rules: + - if: $FUNCTIONAL_TEST == "yes" + when: manual + allow_failure: true + - when: never + +functional:x_notify: + extends: [.functional_tests_rules] + image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID} + needs: + - functional:run_lts_dgx_a100 + - functional:run_dev_dgx_a100 + - functional:run_lts_dgx_h100 + - functional:run_dev_dgx_h100 + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + variables: + RO_API_TOKEN: ${PROJECT_ACCESS_TOKEN_MCORE} + CONTEXT: $FUNCTIONAL_TEST_SCOPE + script: + - env + - | + if [[ "$CI_COMMIT_BRANCH" == "dev" ]]; then + export WEBHOOK_URL=${MCORE_NOTIFICATION_HOOK_DEV} + else + export WEBHOOK_URL=${MCORE_NOTIFICATION_HOOK} + fi + - export RO_API_TOKEN=${PROJECT_ACCESS_TOKEN_MCORE} + - export GITLAB_ENDPOINT + - export CONTEXT=$FUNCTIONAL_TEST_SCOPE + - export TAG_TEAM=$([[ "$CI_COMMIT_BRANCH" == "main" ]] && echo "1" || "0") + - export TEAM_SLUG=$SLACK_ADMIN + - | + python tests/test_utils/python_scripts/notify.py \ + --pipeline-id "${CI_PIPELINE_ID}" \ + --check-for functional-tests \ + --pipeline-context $CONTEXT \ + --pipeline-created-at "${CI_PIPELINE_CREATED_AT}" + + artifacts: + when: always + paths: + - scripts + rules: + - if: ($CI_PIPELINE_SOURCE == "schedule" || $CI_COMMIT_BRANCH == "main") && $FUNCTIONAL_TEST == "yes" + when: always + - when: never + +functional:x_download_golden_values: + extends: [.functional_tests_rules] + image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID} + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + script: + - env + - export RO_API_TOKEN=${PROJECT_ACCESS_TOKEN_MCORE} + - export GITLAB_ENDPOINT + - python tests/test_utils/python_scripts/download_golden_values.py --pipeline-id ${CI_PIPELINE_ID} + artifacts: + paths: + - tests/ + rules: + - if: $FUNCTIONAL_TEST == "yes" + when: manual + allow_failure: true + - when: never diff --git a/.gitlab/stages/05.publish.yml b/.gitlab/stages/05.publish.yml new file mode 100644 index 0000000000..3a34386b0a --- /dev/null +++ b/.gitlab/stages/05.publish.yml @@ -0,0 +1,679 @@ +.publish_common_freeze: + stage: publish + rules: + - if: ($CI_COMMIT_BRANCH == "main") && $PUBLISH == "yes" && $PUBLISH_SCOPE == "code-freeze" + when: manual + - when: never + +.publish_common_release: + stage: publish + rules: + - if: $CI_PIPELINE_SOURCE == "web" && $PUBLISH == "yes" && $PUBLISH_SCOPE == "release" + when: manual + - if: $PUBLISH == "yes" && $PUBLISH_SCOPE == "release" + when: on_success + - when: never + +publish:test_release_pypi_build_wheel: + extends: [.test_rules] + stage: publish + image: + name: ${IMAGE} + entrypoint: [""] + services: + - name: docker:24.0.5-dind + variables: + HEALTHCHECK_TCP_PORT: "2376" + needs: [test:build_image] + parallel: + matrix: + - PACKAGE: megatron-core + PLATFORM: arm64 + IMAGE: quay.io/pypa/manylinux_2_28_aarch64 + - PACKAGE: megatron-core + PLATFORM: amd64 + IMAGE: quay.io/pypa/manylinux_2_28_x86_64 + - PACKAGE: megatron-fsdp + IMAGE: quay.io/pypa/manylinux_2_28_x86_64 + PLATFORM: amd64 + tags: + - arch/${PLATFORM} + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/builder-small + - team/megatron + variables: + PY_ENV: pytorch_25.03 + KUBERNETES_SERVICE_MEMORY_REQUEST: 16Gi + KUBERNETES_SERVICE_MEMORY_LIMIT: 16Gi + PUBLISH_DRYRUN: "yes" + KUBERNETES_SERVICE_CPU_REQUEST: 4 + KUBERNETES_SERVICE_CPU_LIMIT: 8 + before_script: + - env + - eval PUBLISH_COMMIT=$PUBLISH_COMMIT + - env + - git fetch origin $PUBLISH_COMMIT + - git checkout $PUBLISH_COMMIT + script: + - set -x + - echo $PUBLISH_DRYRUN + - | + if [ "$PACKAGE" = "megatron-core" ]; then + ROOTDIR="megatron/core" + BUILD_DIR="." + elif [ "$PACKAGE" = "megatron-fsdp" ]; then + ROOTDIR="megatron/core/distributed/fsdp/src/megatron_fsdp" + BUILD_DIR="megatron/core/distributed/fsdp/src" + else + echo Unknown package: $PACKAGE + exit 1 + fi + - | + if [ "$PUBLISH_DRYRUN" = "yes" ]; then + PRE_RELEASE=$(sed -n "s/.*PRE_RELEASE = '\(.*\)'/\1/p" $ROOTDIR/package_info.py) + sed -i "/^PRE_RELEASE/c\PRE_RELEASE = '${PRE_RELEASE}.dev$((RANDOM % 900000 + 100000))'" $ROOTDIR/package_info.py + fi + + - pushd $BUILD_DIR + - /opt/python/cp310-cp310/bin/python -m build + - /opt/python/cp311-cp311/bin/python -m build + - /opt/python/cp312-cp312/bin/python -m build + - /opt/python/cp313-cp313/bin/python -m build + - USE_DIST_DIR=0 + - auditwheel repair dist/*.whl || USE_DIST_DIR=1 + - | + if [ "$USE_DIST_DIR" != "1" ]; then + rm -rf dist/*.whl + fi + - popd + - pushd $ROOTDIR + - EXPECTED_RELEASE_NUMBER=$(/opt/python/cp312-cp312/bin/python -c "import package_info; print(package_info.__version__)") + - popd + - echo "EXPECTED_RELEASE_NUMBER_${PLATFORM}_${PACKAGE//-/_}=$EXPECTED_RELEASE_NUMBER" | tee -a build.env + - mkdir -p wheelhouse/ + - | + if [ "$PACKAGE" = "megatron-fsdp" ]; then + cp -a megatron/core/distributed/fsdp/src/dist/* wheelhouse/ + fi + - | + ls -al wheelhouse/ || true + ls -al dist/ || true + artifacts: + paths: + - megatron/core/package_info.py + - wheelhouse/ + - dist/ + - megatron/core/distributed/fsdp/src/megatron_fsdp/package_info.py + reports: + dotenv: build.env + retry: + max: 2 + +publish:test_release_pypi_test_wheel: + extends: [.test_rules] + stage: publish + image: + name: python:3.11 + entrypoint: [""] + needs: + - job: publish:test_release_pypi_build_wheel + optional: true + parallel: + matrix: + - PACKAGE: megatron-core + PLATFORM: arm64 + - PACKAGE: megatron-core + PLATFORM: amd64 + - PACKAGE: megatron-fsdp + PLATFORM: amd64 + services: + - name: docker:24.0.5-dind + variables: + HEALTHCHECK_TCP_PORT: "2376" + tags: + - arch/${PLATFORM} + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/builder-small + - team/megatron + variables: + KUBERNETES_SERVICE_MEMORY_REQUEST: 16Gi + KUBERNETES_SERVICE_MEMORY_LIMIT: 16Gi + KUBERNETES_SERVICE_CPU_REQUEST: 4 + KUBERNETES_SERVICE_CPU_LIMIT: 8 + GIT_STRATEGY: none + PUBLISH_DRYRUN: "yes" + script: + - set -x + - env + - rm -rf megatron + - pip install -U --no-cache-dir pip + - | + if [ "$PACKAGE" = "megatron-core" ]; then + ROOTPATH="megatron.core" + WHEEL_PREFIX="megatron_core" + elif [ "$PACKAGE" = "megatron-fsdp" ]; then + ROOTPATH="megatron_fsdp" + WHEEL_PREFIX="megatron_fsdp" + else + echo Unknown package: $PACKAGE + exit 1 + fi + - | + ls -al wheelhouse/ || true + ls -al dist/ || true + ls -al megatron/core/distributed/fsdp/src/megatron_fsdp/package_info.py || true + ls -al megatron/core/distributed/fsdp/src/wheelhouse/ || true + ls -al megatron/core/distributed/fsdp/src/dist/ || true + - | + if [ "$PACKAGE" = "megatron-core" ]; then + if [[ "$PLATFORM" == "arm64" ]]; then + for file in wheelhouse/$WHEEL_PREFIX*cp311*aarch64.whl; do + pip install --no-cache-dir "$file" + done + else + for file in wheelhouse/$WHEEL_PREFIX*cp311*x86_64.whl; do + pip install --no-cache-dir "$file" + done + fi + else + pip install --no-cache-dir wheelhouse/$WHEEL_PREFIX*.whl + fi + - RELEASE_NUMBER=$(python -c "import $ROOTPATH; print($ROOTPATH.__version__)") + - | + var=EXPECTED_RELEASE_NUMBER_${PLATFORM}_${PACKAGE//-/_} + EXPECTED_RELEASE_NUMBER=$(echo "${!var}") + test "$EXPECTED_RELEASE_NUMBER" == "$RELEASE_NUMBER" + - echo "$var=$RELEASE_NUMBER" | tee -a build.env + artifacts: + reports: + dotenv: build.env + paths: + - megatron/core/package_info.py + - wheelhouse/ + - dist/ + - megatron/core/distributed/fsdp/src/megatron_fsdp/package_info.py + retry: + max: 2 + +publish:test_release_version_bump: + needs: [publish:test_release_pypi_test_wheel] + extends: [.test_rules] + image: nentangso/alpine-git-curl-jq + stage: publish + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + before_script: + - eval PUBLISH_COMMIT=$PUBLISH_COMMIT + - eval PUBLISH_VERSION_BUMP_BRANCH=$PUBLISH_VERSION_BUMP_BRANCH + - git fetch origin $PUBLISH_COMMIT + - git checkout $PUBLISH_COMMIT + variables: + PUBLISH_DRYRUN: "yes" + script: + - set -x + - env + - echo $PUBLISH_DRYRUN + + - ROOTDIR="megatron/core" + - MAJOR=$(cat $ROOTDIR/package_info.py | awk '/^MAJOR = /' | awk -F"= " '{print $2}') + - MINOR=$(cat $ROOTDIR/package_info.py | awk '/^MINOR = /' | awk -F"= " '{print $2}') + - PATCH=$(cat $ROOTDIR/package_info.py | awk '/^PATCH = /' | awk -F"= " '{print $2}') + - PRERELEASE=$(cat $ROOTDIR/package_info.py | awk '/^PRE_RELEASE = /' | awk -F"= " '{print $2}' | tr -d '"' | tr -d "'") + - | + if [[ "$PRERELEASE" != "" ]]; then + NEXT_PATCH=$PATCH + NEXT_PRERELEASE=rc$((${PRERELEASE#rc} + 1)) + else + NEXT_PATCH=$((${PATCH} + 1)) + NEXT_PRERELEASE=$NEXT_PRERELEASE + fi + - sed -i "/^PATCH/c\PATCH = $NEXT_PATCH" $ROOTDIR/package_info.py + - sed -i "/^PRE_RELEASE/c\PRE_RELEASE = '$NEXT_PRERELEASE'" $ROOTDIR/package_info.py + + - ROOTDIR="megatron/core/distributed/fsdp/src/megatron_fsdp" + - MAJOR=$(cat $ROOTDIR/package_info.py | awk '/^MAJOR = /' | awk -F"= " '{print $2}') + - MINOR=$(cat $ROOTDIR/package_info.py | awk '/^MINOR = /' | awk -F"= " '{print $2}') + - PATCH=$(cat $ROOTDIR/package_info.py | awk '/^PATCH = /' | awk -F"= " '{print $2}') + - PRERELEASE=$(cat $ROOTDIR/package_info.py | awk '/^PRE_RELEASE = /' | awk -F"= " '{print $2}' | tr -d '"' | tr -d "'") + - | + if [[ "$PRERELEASE" != "" ]]; then + NEXT_PATCH=$PATCH + NEXT_PRERELEASE=rc$((${PRERELEASE#rc} + 1)) + else + NEXT_PATCH=$((${PATCH} + 1)) + NEXT_PRERELEASE=$NEXT_PRERELEASE + fi + - sed -i "/^PATCH/c\PATCH = $NEXT_PATCH" $ROOTDIR/package_info.py + - sed -i "/^PRE_RELEASE/c\PRE_RELEASE = '$NEXT_PRERELEASE'" $ROOTDIR/package_info.py + + - git config --global user.email "mcore-bot@nvidia.com" + - git config --global user.name "Mcore Bot" + - git remote set-url origin "https://gitlab-ci-token:${PROJECT_ACCESS_TOKEN_MCORE}@${GITLAB_ENDPOINT}/$CI_PROJECT_NAMESPACE/megatron-lm.git" + - | + CMD=$( + cat <<'EOF' + git fetch origin $PUBLISH_VERSION_BUMP_BRANCH && \ + git switch $PUBLISH_VERSION_BUMP_BRANCH && \ + git add megatron/core/package_info.py && \ + git add megatron/core/distributed/fsdp/src/megatron_fsdp/package_info.py && \ + git commit -m "chore: Version bump" && \ + git push origin $PUBLISH_VERSION_BUMP_BRANCH + EOF + ) + + - | + if [[ "$PUBLISH_DRYRUN" == "yes" ]]; then + echo "$CMD" + else + eval "$CMD" + fi + +publish:test_release_pypi_push_wheel: + extends: [.test_rules] + image: python:3.11 + stage: publish + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + needs: + - job: publish:test_release_pypi_test_wheel + optional: true + - job: publish:test_release_version_bump + optional: true + variables: + GIT_STRATEGY: none + PUBLISH_DRYRUN: "yes" + timeout: 3m + script: + - set -x + - echo $PUBLISH_DRYRUN + - | + if [ "$PUBLISH_DRYRUN" = "yes" ]; then + REPOSITORY=testpypi + export TWINE_USERNAME=$TWINE_TEST_USERNAME + export TWINE_PASSWORT=$TWINE_TEST_PASSWORD + else + REPOSITORY=pypi + export TWINE_USERNAME=$TWINE_PROD_USERNAME + export TWINE_PASSWORT=$TWINE_PROD_PASSWORD + fi + + - ls -al dist/ + - ls -al wheelhouse/ + - pip install twine + + - | + CMD=$(echo -E "twine upload --verbose -u $TWINE_USERNAME -p $TWINE_PASSWORT --repository $REPOSITORY wheelhouse/* dist/*") + if [[ "$PUBLISH_DRYRUN" != "yes" ]]; then + eval "$CMD" + fi + +publish:test_release_github: + extends: [.test_rules] + needs: + - job: publish:test_release_pypi_test_wheel + optional: true + - job: publish:test_release_version_bump + optional: true + stage: publish + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + image: nentangso/alpine-git-curl-jq + before_script: + - eval PUBLISH_COMMIT=$PUBLISH_COMMIT + - git fetch origin $PUBLISH_COMMIT + - git checkout $PUBLISH_COMMIT + variables: + PUBLISH_DRYRUN: "yes" + script: + - set -x + - env + - apk add --no-cache bash + - | + bash <<'EOF' + set -x + echo $PUBLISH_DRYRUN + PLATFORM=amd64 + PACKAGE=megatron-core + var=EXPECTED_RELEASE_NUMBER_${PLATFORM}_${PACKAGE//-/_} + RELEASE_NUMBER=$(echo "${!var}") + NAME="NVIDIA Megatron Core $RELEASE_NUMBER" + IS_PRERELEASE=$([[ "$RELEASE_NUMBER" == *rc* ]] && echo "true" || echo "false") + + if [[ "$IS_PRERELEASE" == "true" ]]; then + DATE=$(date +"%Y-%m-%d") + CHANGELOG="Prerelease: $NAME ($DATE)" + else + CHANGELOG=$(awk '/^## '"$NAME"'/{flag=1; next} /^## /{flag=0} flag' CHANGELOG.md) + CHANGELOG=$(echo "$CHANGELOG" | sed '/./!d') + fi + + + PAYLOAD=$(jq -nc \ + --arg TAG_NAME "core_v${RELEASE_NUMBER}" \ + --arg CI_COMMIT_SHA "$PUBLISH_COMMIT" \ + --arg NAME "$NAME" \ + --arg BODY "$CHANGELOG" \ + --argjson PRERELEASE "$IS_PRERELEASE" \ + '{ + "tag_name": $TAG_NAME, + "target_commitish": $CI_COMMIT_SHA, + "name": $NAME, + "body": $BODY, + "draft": false, + "prerelease": $PRERELEASE, + "generate_release_notes": false + }' + ) + echo -E "$PAYLOAD" | tee -a payload.txt + + cat payload.txt + + CMD=$(echo -E 'curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer '"$GH_TOKEN"'" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/NVIDIA/Megatron-LM/releases \ + -d @payload.txt + ') + + if [[ "$PUBLISH_DRYRUN" == "yes" ]]; then + echo -E "$CMD" + else + eval "$CMD" + fi + EOF + +publish:test_release_notify: + needs: [publish:test_release_pypi_test_wheel, publish:test_release_pypi_push_wheel, publish:test_release_github] + extends: [.test_rules] + image: badouralix/curl-jq + stage: publish + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + variables: + PUBLISH_DRYRUN: "yes" + script: + - set -x + - env + - apk add --no-cache bash + - | + bash <<'EOF' + set -x + echo $PUBLISH_DRYRUN + PLATFORM=amd64 + PACKAGE=megatron-core + var=EXPECTED_RELEASE_NUMBER_${PLATFORM}_${PACKAGE//-/_} + RELEASE_NUMBER=$(echo "${!var}") + URL="https://github.com/NVIDIA/Megatron-LM/releases/tag/core_v$RELEASE_NUMBER" + + cat < message.json + { + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "Releasebot 🤖: Megatron-Core released <${URL}|core_v${RELEASE_NUMBER}> 🚀" + } + } + ] + } + MSG + + cat message.json + + CMD=$(echo curl \ + -X POST \ + -H "Content-type: application/json" \ + -d @message.json ${MCORE_NOTIFICATION_HOOK_MAIN} + ) + + if [[ "$PUBLISH_DRYRUN" == "yes" ]]; then + echo "$CMD" + else + eval "$CMD" + fi + EOF + +publish:release_pypi_build_wheel: + extends: [publish:test_release_pypi_build_wheel, .publish_common_release] + dependencies: [] + variables: + PUBLISH_DRYRUN: "no" + +publish:release_pypi_test_wheel: + extends: [publish:test_release_pypi_test_wheel, .publish_common_release] + needs: [publish:release_pypi_build_wheel] + variables: + PUBLISH_DRYRUN: "no" + +publish:release_version_bump: + needs: [publish:release_pypi_test_wheel] + extends: [publish:test_release_version_bump, .publish_common_release] + variables: + PUBLISH_DRYRUN: "no" + +publish:release_pypi_push_wheel: + extends: [publish:test_release_pypi_push_wheel, .publish_common_release] + needs: [publish:release_pypi_test_wheel, publish:release_version_bump] + dependencies: [publish:release_pypi_test_wheel] + variables: + PUBLISH_DRYRUN: "no" + +publish:release_github: + extends: [publish:test_release_github, .publish_common_release] + needs: [publish:release_pypi_test_wheel, publish:release_version_bump] + dependencies: [publish:release_pypi_test_wheel] + variables: + PUBLISH_DRYRUN: "no" + +publish:release_notify: + needs: [publish:release_pypi_test_wheel, publish:release_pypi_push_wheel, publish:release_github] + extends: [publish:test_release_notify, .publish_common_release] + dependencies: [publish:release_pypi_test_wheel] + variables: + PUBLISH_DRYRUN: "no" + +publish:docs: + extends: [.publish_common_release] + image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID} + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + before_script: + - eval PUBLISH_COMMIT=$PUBLISH_COMMIT + - git fetch origin '+refs/merge-requests/*:refs/remotes/merge-requests/*' + - git fetch origin $PUBLISH_COMMIT + - git checkout $PUBLISH_COMMIT + script: + - cd .. + - rm -rf documentation && git clone --recursive https://gitlab-ci-token:${PAT}@${GITLAB_ENDPOINT}/nemo-megatron-core-tme/documentation.git + - cd documentation/megatron-lm + - git config --global user.email "mcore-bot@nvidia.com" + - git config --global user.name "Mcore Bot" + - git fetch origin '+refs/merge-requests/*:refs/remotes/merge-requests/*' + - git fetch origin $PUBLISH_COMMIT + - git checkout $PUBLISH_COMMIT + - cd .. + - git add megatron-lm + - | + git commit -m 'feat: Bump mcore' + + - git push + rules: + - if: '$CI_COMMIT_BRANCH == "main" && $CI_PIPELINE_SOURCE == "push"' + allow_failure: true + - when: never + +publish:upload_statistics: + stage: publish + image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID} + needs: + - job: test:unit_tests_pyt(DEV)_mcore(legacy) + optional: true + - job: test:unit_tests_pyt(LTS)_mcore(legacy) + optional: true + - job: test:unit_tests_pyt(DEV)_mcore(latest) + - job: test:unit_tests_pyt(LTS)_mcore(latest) + - job: functional:run_lts_dgx_a100 + optional: true + - job: functional:run_lts_dgx_h100 + optional: true + - job: functional:run_dev_dgx_a100 + optional: true + - job: functional:run_dev_dgx_h100 + optional: true + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + script: + - env + - export RO_API_TOKEN=${PROJECT_ACCESS_TOKEN_MCORE} + - export GITLAB_ENDPOINT + - export DASHBOARD_ENDPOINT + - python tests/test_utils/python_scripts/dashboard.py --pipeline-id ${CI_PIPELINE_ID} + rules: + - if: ($CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' || $CI_MERGE_REQUEST_EVENT_TYPE == 'merge_train') && ($UNIT_TEST == "yes" || $INTEGRATION_TEST == "yes" || $FUNCTIONAL_TEST == "yes") + when: always + allow_failure: true + - when: never + +public:review_reminder: + stage: publish + image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID} + script: + - export GITLAB_ENDPOINT + - export RO_API_TOKEN=${PAT} + - export SLACK_WEBHOOK_URL=${SLACK_REMINDER_HOOK} + - export SLACK_API_TOKEN=${SLACK_API_TOKEN} + - python tests/test_utils/python_scripts/auto_reminder.py + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + rules: + - if: $CI_COMMIT_BRANCH == "ci-review-reminder" && $PUBLISH == "yes" && $PUBLISH_SCOPE == "review-reminder" + - when: never + +publish:code_freeze: + extends: [.publish_common_freeze] + image: ${CI_MCORE_LTS_IMAGE}:${CI_PIPELINE_ID} + needs: [test:build_image] + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + script: + - git fetch origin main + - git config --global user.email "mcore-bot@nvidia.com" + - git config --global user.name "Mcore Bot" + - git remote set-url origin "https://gitlab-ci-token:${PAT}@${GITLAB_ENDPOINT}/$CI_PROJECT_NAMESPACE/megatron-lm.git" + - sed -i "/^PRE_RELEASE/c\PRE_RELEASE = ''" megatron/core/package_info.py + - VERSION=$(python -c "from megatron import core; print(core.__version__)") + - RELEASE_BRANCH=core_r$VERSION + - git switch --force-create $RELEASE_BRANCH origin/main + - git push -u origin $RELEASE_BRANCH + - | + MESSAGE='{ + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "Releasebot 🤖: Megatron Core has been frozen 🎉 to branch `'"$RELEASE_BRANCH"'`" + } + } + ] + }' + - | + curl -X POST -H "Content-type: application/json" --data "$MESSAGE" ${MCORE_NOTIFICATION_HOOK_MAIN} + + - git switch main + - git switch --force-create bot/chore/bump-version + - git add megatron/core/package_info.py + - | + git commit -m "chore: adjust version version" + - git push -u origin bot/chore/bump-version + - | + curl \ + --header "PRIVATE-TOKEN: $PAT" \ + --url https://${GITLAB_ENDPOINT}/api/v4/projects/${CI_PROJECT_ID}/merge_requests \ + -d "source_branch=bot/chore/bump-version" \ + -d "target_branch=$RELEASE_BRANCH" \ + -d "title=chore: Fix version of \`$RELEASE_BRANCH\`" \ + -d "description=[🤖]: Hi @okoenig 👋,

we've adjusted the version number of \`$RELEASE_BRANCH\` for you! 🚀

Please review and approve this cherry pick by your convenience\!" + +publish:upgrade_dependencies: + stage: publish + image: ${CI_MCORE_DEV_IMAGE}:${CI_PIPELINE_ID} + script: + - export GITLAB_ENDPOINT + - export RO_API_TOKEN=${PAT} + - export BRANCH_NAME=ci-bot/build/upgrade-dependencies-$(date +%Y-%m-%d) + - uv lock --upgrade + - git checkout -b $BRANCH_NAME + - git add uv.lock pyproject.toml + - git config --global user.email "mcore-bot@nvidia.com" + - git config --global user.name "Mcore Bot" + - git remote set-url origin "https://gitlab-ci-token:${PAT}@${GITLAB_ENDPOINT}/$CI_PROJECT_NAMESPACE/megatron-lm.git" + - | + git commit -m "chore: Upgrade dependencies" + - git push --force -u origin $BRANCH_NAME + - | + curl \ + --header "PRIVATE-TOKEN: $PROJECT_ACCESS_TOKEN_MCORE" \ + --url https://${GITLAB_ENDPOINT}/api/v4/projects/${CI_PROJECT_ID}/merge_requests \ + -d "source_branch=$BRANCH_NAME" \ + -d "target_branch=main" \ + -d "title=chore: Upgrade dependencies ($(date +%Y-%m-%d))" \ + -d "labels=test::Run functional tests" \ + -d "description=[🤖]: Hi @okoenig 👋,

we've upgraded the dependencies of \`$BRANCH_NAME\` for you! 🚀

Please review and approve this cherry pick by your convenience\!" + tags: + - arch/amd64 + - env/prod + - origin/jet-fleet + - owner/jet-core + - purpose/utility + - team/megatron + rules: + - if: $CI_COMMIT_BRANCH == "ci-upgrade-dependencies" && $PUBLISH == "yes" && $PUBLISH_SCOPE == "upgrade-dependencies" + - when: never diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..851efa0e30 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,17 @@ +repos: +- repo: https://github.com/psf/black + rev: 'refs/tags/24.4.2:refs/tags/24.4.2' + hooks: + - id: black + files: ^megatron/core/.*|^tests/unit_tests/.* + args: ["--skip-magic-trailing-comma", "--skip-string-normalization"] +- repo: https://github.com/pycqa/pylint + rev: v3.2.6 + hooks: + - id: pylint + files: ^megatron/core/.* +- repo: https://github.com/pycqa/isort + rev: 5.13.2 + hooks: + - id: isort + files: ^megatron/core/.* \ No newline at end of file diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000000..e2c1e0f36f --- /dev/null +++ b/.pylintrc @@ -0,0 +1,21 @@ +[MAIN] +ignore-paths=tests +max-line-length=100 +load-plugins=pylint.extensions.bad_builtin +[MESSAGES CONTROL] +disable=all + +enable=C0115,C0116,W0611,C0301,E0606,W0141 +# C0115: missing-class-docstring +# C0116: missing-function-docstring +# W0611: unused-import +# C0301: line-too-long +# E0606: possibly-used-before-assignment +# W0141: bad-builtin (from bad_builtin extension) + +[BASIC] +bad-functions=print + +[BAD_BUILTIN] +# Specify which builtins should be flagged +bad-builtins=print \ No newline at end of file diff --git a/.python-version b/.python-version new file mode 100644 index 0000000000..fdcfcfdfca --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.12 \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..471fe836d5 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,288 @@ +# Changelog + +## NVIDIA Megatron Core 0.14.0 + +* Features + * Inference + * Add async support for DynamicInferenceEngine ([MR \!3187](https://github.com/NVIDIA/Megatron-LM/commit/05079d55a5bfcc7a43f4619e36a40a9e8db3f882)) + * Pad input tensors and enable FP8 weights for FP8 inference ([MR \!3341](https://github.com/NVIDIA/Megatron-LM/commit/6a6cd478839d90cf09a837adf8c79cbc844bc920)) + * Force inference to always gather logits with tensor parallelism ([MR \!3442](https://github.com/NVIDIA/Megatron-LM/commit/7c9cdcb794089968278c7272e0261a68edf5d369)) + * Multi batch size CUDA Graphs for Dynamic Inference ([MR \!3402](https://github.com/NVIDIA/Megatron-LM/commit/30aabe5e3133c6d70aa55aaabad4ea8cb04ce63c)) + * Post-training + * ModelOpt updates ([MR \!3268](https://github.com/NVIDIA/Megatron-LM/commit/550ed5243c3a18e39430c15e8918ee63e41d7eaf)) + * Add speculative decoding AR validation feature + * Add DeepSeek and Qwen model configs + * Performance + * ModelCommProcessGroup integration ([MR \!3391](https://github.com/NVIDIA/Megatron-LM/commit/26adc2dfde53fbc2b063e2fdd1d9ed26578811a6)) + * Add HyperCommGrid: N-Dimensional Communication Grid for Model Parallelism ([MR \!3398](https://github.com/NVIDIA/Megatron-LM/commit/45400df7da7fa23e3aff86804e5ac254d9a8d3c0)) + * Flexible creation and management of communication groups + * Add support for Spike No More embedding initializations and weight decay skipping ([MR \!3500](https://github.com/NVIDIA/Megatron-LM/commit/ee74aa66a06b24e511270f285db475941ef63bfd)) + * Model support + * Add MiMo video VLM train example (\[MR \!3543) + * Add AVLM for MIMO (\[MR \!3624) + * Ease of use + * Add uv support for source installs ([MR \!3615](https://github.com/NVIDIA/Megatron-LM/commit/164204cd7216e642bdef7299c569d95f02f9a79e)) + * Automated weekly prereleases ([MR \!3574](https://github.com/NVIDIA/Megatron-LM/commit/7e59266c70ef34a246438640af690b55c7ecac28)) +* Bug fixes + * Use mscale\_all\_dim for softmax\_factor ([MR \!2800](https://github.com/NVIDIA/Megatron-LM/commit/e96a358f60c82b8ac8d965d91c3cc4ad0230a4e0)) + * Fix FP8 param blockwise scaling unit test ([MR \!3480](https://github.com/NVIDIA/Megatron-LM/commit/57082f946a04c3390fcfc43634dc546ec3ded033)) + * Fix unit test blockwise scaling ([MR \!3491](https://github.com/NVIDIA/Megatron-LM/commit/6d95fe63658f967e56a3fda88a9c30a424fcb520)) + * Optimize prefill for token-less requests ([MR \!3499](https://github.com/NVIDIA/Megatron-LM/commit/daaa650a9ac4291d4027ca2fdeb4298ce024efd2)) + * Add default values for Fp8Padding and Fp8Unpadding ([MR \!3501](https://github.com/NVIDIA/Megatron-LM/commit/42b2b1d10a9cb699b7e5aa40f6bfba9c2a1348aa)) + * Fix CUDA graph logic for flexible pp layout ([MR \!3505](https://github.com/NVIDIA/Megatron-LM/commit/020d85e50ddf0f0282802002acb3662129a519c5)) + * Load FP8 models with strict=False ([MR \!3508](https://github.com/NVIDIA/Megatron-LM/commit/1ab876ddc4c1893c76f26d775226a8d1dcdfb3d2)) + * Skip rope check for torch \< 1.4.0 ([MR \!3528](https://github.com/NVIDIA/Megatron-LM/commit/d8180ef8ed0bb6f305dcdedf1b27d91304f361a3)) + * Disable Apex tests for stability ([MR \!3539](https://github.com/NVIDIA/Megatron-LM/commit/d1256277fe378add0a2cfd7251f5a350b6d126ec)) + * Fix typo in parallel\_state expert parallelism ([MR \!3548](https://github.com/NVIDIA/Megatron-LM/commit/5783ff32af759b8102cf0cb0bb82b30c48b9da26)) + * Guard modelopt on macOS ([MR \!3549](https://github.com/NVIDIA/Megatron-LM/commit/76144fe1106e4fb0e69aa75b7a6ab66e71e8f37f)) + * Retry on CUDA function failure ([MR \!3554](https://github.com/NVIDIA/Megatron-LM/commit/809aab68307a64c1386d68cc78ef70f8f4e12a80)) + * Fix NCCL mem pool creation error ([MR \!3557](https://github.com/NVIDIA/Megatron-LM/commit/b61e21153146a563309b5d44cb5d7f7425806072)) + * Fix get\_rotary\_seq\_len return type ([MR \!3559](https://github.com/NVIDIA/Megatron-LM/commit/1fa6bc83c7aeae95abc8e86ff0aac596985a01c3)) + * Retry on CUDA function failure ([MR \!3560](https://github.com/NVIDIA/Megatron-LM/commit/7da88d74865c3f1a59894173246f26e7b3bf91b9)) + * Fix NCCL allocator attribute error ([MR \!3565](https://github.com/NVIDIA/Megatron-LM/commit/6b656114795d74c3353cb007c59af49b1752f447)) + * Ensure multi-prompt inference works ([MR \!3568](https://github.com/NVIDIA/Megatron-LM/commit/0fae48931000c9c7af06f7dcf037b5b7d96e0cd6)) + * Fix MD5 on FIPS systems ([MR \!3577](https://github.com/NVIDIA/Megatron-LM/commit/83ee8c2848a3b1d42b40086a64da11e19f4b191f)) + * Fixes dynamic context and inference bugs ([MR \!3582](https://github.com/NVIDIA/Megatron-LM/commit/e9c1da60a1ccc85376666d58568ed1d3e5a4f9db)) + * Fix TE version for interleaved fused RoPE ([MR \!3586](https://github.com/NVIDIA/Megatron-LM/commit/b72b6cc161f5273b545bca09677382917cf20492)) + * Fix MTP with MoE and TP logging ([MR \!3594](https://github.com/NVIDIA/Megatron-LM/commit/9af96623b66693e058f6bfce8d0094dc976792d8)) + * Guard TE import fix ([MR \!3596](https://github.com/NVIDIA/Megatron-LM/commit/1bf946b1ec3f11e71459c7c0d06a97edbed96a1a)) + * Add assertion for NCCL UB case ([MR \!3599](https://github.com/NVIDIA/Megatron-LM/commit/e11d28592f19c122859be764b7afe7c208d9acc1)) + * Remove Encoder PP related Functions ([MR \!3604](https://github.com/NVIDIA/Megatron-LM/commit/9e49aa4446a58cc21c4dc0c5d0806551ad075ca7)) + * Fix segfaults in tests ([MR \!3605](https://github.com/NVIDIA/Megatron-LM/commit/f6492fe8164fd5b9ad55007d435ccfc66cb98cc7)) + * Fix TE error in distributed optimizer ([MR \!3625](https://github.com/NVIDIA/Megatron-LM/commit/e6c510ff3c1159f8955589b26f7c395bdf0607d9)) + * Remove redundant barrier in checkpoint flow ([MR \!3626](https://github.com/NVIDIA/Megatron-LM/commit/26869feb6a3ac7f5616cb7253c37a4244d107d70)) + * Support VPP MTP, fix logging ([MR \!3630](https://github.com/NVIDIA/Megatron-LM/commit/c351a473c7eedac2c43eab0815afb9759f4f8187)) + * Retry mechanism for free(): invalid pointer errors ([MR \!3632](https://github.com/NVIDIA/Megatron-LM/commit/ec35b41b2df145a7ccb84afc48d94e0786e094da)) + * Fix test\_replication.py issues ([MR \!3633](https://github.com/NVIDIA/Megatron-LM/commit/f7b50b271b2e0e396069e02551b21aa6fb374b43)) + * Fix typo in parallel\_state ([MR \!3634](https://github.com/NVIDIA/Megatron-LM/commit/3c79a2c330290df58804c33e28e7c197fcc1f0b9)) + * Fix CUDA graph logic determination ([MR \!3635](https://github.com/NVIDIA/Megatron-LM/commit/90efa3ef8a3c4f9e0f1db9f67ab9348bfa501387)) + * Fix TE installation error ([MR \!3636](https://github.com/NVIDIA/Megatron-LM/commit/7e7322c01c9cb8ec254ecd9042700b22b70fe5c8)) + * Ensure correct sharding type in local tests ([MR \!3643](https://github.com/NVIDIA/Megatron-LM/commit/946357f8dd7fdc12424b3a66bc999e6c0a02696c)) + * Fix cudagraphed backward buffer reuse for last layer ([MR \!3645](https://github.com/NVIDIA/Megatron-LM/commit/ee61cf450d24760952e8995aab045ab6d55b986e)) + * Set default for packed\_seq\_params in get\_rotary\_seq\_len ([MR \!3651](https://github.com/NVIDIA/Megatron-LM/commit/510d58c46664f44c556005ac928c5c531e12f761)) + * Fix dynamic example script errors ([MR \!3653](https://github.com/NVIDIA/Megatron-LM/commit/72e290bf1f4bbf0c8047bb10a51da6ea6372e163)) + * Guard TE import fix ([MR \!3666](https://github.com/NVIDIA/Megatron-LM/commit/ac198fc0d60a8c748597e01ca4c6887d3a7bcf3d)) +* Known issues + +## NVIDIA Megatron Core 0.13.0 + +* Support bf16 dtype for optimizer states to use precision-aware optimizer in TransformerEngine +* MoE + * Features: + * Flexible Asymmetric Virtual Pipeline Parallelism with Custom Pipeline Layout (--pipeline-model-parallel-layout) + * Add support to pass custom parallelism groups to MoE modules. + * Add Hybrid Shard Data-Parallel support for MoE models (--num-distributed-optimizer-instances) + * Support EP \+ custom FSDP training for DeepSeek-V3 + * FP8 support for Multi-Token-Prediction + * Memory Optimization + * Fine-grained recomputation to reduce activation memory. (--recompute-modules with \--recompute-granularity selective) + * Memory efficient token permutation by moving the probs multiplication from unpermutation to activation function of GroupedMLP. + * Performance Optimization + * MLA RoPE fusion kernel and YARN embedding cache. + * FP8 padding optimization of MoE models by padding the routing map. + * Bug fixes: + * Fix the aux loss calculation when expert\_bias or group limited routing is used. This leads to load\_balancing\_loss values change compared to the previous version. + * Fix packed sequence support for MLA + * Known Issues: + * MTP is not compatible with flexible pipeline layout, will be fixed at \!3594. + * MTP convergence issue with TP2, will be fixed at \!3594. + +## NVIDIA Megatron Core 0.12.0 + +* Add FP8 recipe selection to arguments (--fp8-recipe, --first-last-layers-bf16, --num-layers-at-start-in-bf16, --num-layers-at-end-in-bf16) +* Context parallel: fix loss scaling when calculate_per_token_loss=True +* Make the number of data parallel communication buckets configurable (--ddp-num-buckets, --ddp-pad-buckets-for-high-nccl-busbw) +* Inference + * Support in-flight batching and chunked KV cache + * Reduce memory usage, + * by not materializing full attention mask + * by only materializing logits for the last token during decode + * by removing an obsolete tensor reference +* Hybrid Model + * Inference + * Add CUDA graph support + * Change tools/run_mamba_text_generation_server.py to use megatron.core.inference + * Fix a shape issue when materializing logits for Mamba model + * Improve initialization of Mamba layers + * Add configuration switches (--mamba-state-dim, --mamba-head-dim, --mamba-num-groups, --is-hybrid-model) + * Make num_floating_point_operations work with hybrid model + * Make hybrid_conversion.py work with mixer that uses TE linear + * Add FP8 support + * Fix Mamba dt_bias tensor parallelism + * Support multimodal tokenizer + * Improve data parallelism scaling +* MoE + * Features: + * DeepEP support, compatible with all the parallelisms and token drop / dropless + * Important precision improvement: Enable FP32/FP64 routing and unpermutation using –moe-router-dtype. FP32 is recommended for all fine-grained MoE training + * CUDA Graph support for MoE + * Multi-Token Prediction (MTP) Support + * Fused indices_to_multihot kernel for DeepEP dispatcher + * Bug fixes: + * Fix Hang Issue with MoE+Dense Hybrid models + * Update theoretical memory and tflops estimation for MoE and MLA + * Fix MoE Aux loss scaling for per token loss + * Fixes for group limited routing and expert bias. We verified these fixes through dsv3 e2e verifications + * Known issues: + * The ckpt trained with Custom FSDP for MoE may not be compatible with 3D parallel training. + +## NVIDIA Megatron Core 0.11.0 + +* Add multi datacenter training support though N/S connection +* MoE + * Features + * Support DeepSeek-V3 fine-tuning + * Aux-loss-free load balancing strategy + * Node-limited routing and Device-limited routing support. + * Tensor Parallelism support for MLA and Sequence Auxiliary Loss + * MTP (with TP and PP support) is coming soon. + * Permutation / Unpermutation fusion kernel from TransformerEngine. + * Uneven virtual pipeline parallel split support in first and last PP stage. + * Bug fixes: + * Fix the grad scale when TP != expert-TP and average_in_collective is enabled in DDP. + * Fix TEGroupedMLP distckpt compatibility issue with FP8 padding/unpadding. + * Known Issues: + * When training the Dense+MoE hybrid model, the process will hang if any PP rank does not have expert params. +* Add MX-FP16 support for optimizer and master weights +* CUDA Graph memory optimizations +* Enable UCC backend for PP communication +* Optimizer CPU offload support for memory savings +* Models + * Initial RADIO/CRADIO implementation + * llama3.2 support +* Hybrid Model + * Support quantization via TensorRT Model Optimizer + +## NVIDIA Megatron Core 0.10.0 + +* Adding MLA to MCore +* Enable FP8 for GroupedMLP +* MoE Parallel Folding +* Enhance MoE Architecture: Support MoE Layer Frequency Patterns and Configurable MoE FFN Hidden Size +* Multimodal: NVLM training and evaluation support in MCore +* Mamba Hybrid + * Increase performance and reduce memory footprint of Triton language/compiler distributed caching + * Add more unit testing and fix bugs + +## NVIDIA Megatron Core 0.9.0 + +* Uneven pipeline parallelism + * Enable pipeline parallelism where first and last ranks have fewer transformer layers than the intermediate ranks +* Per layer CUDAGraph support for GPT training with Transformer Engine modules +* Enable different TP sizes for the vision encoder +* Enable pipeline parallelism for T5 & Llava models +* Support multi-tile multi-image input in Llava models +* MoE + * FP8 support + * Runtime upcycling support + * Dispatcher implementation optimizations + * Shared expert support with overlapping optimizations + * Qwen Model support +* Known Issues + * When using sequence parallel, during the transformer block forward pass, dropout is not using the appropriate rng context. +* NVRx / Fault tolerance + * fault and hang detection in addition to existing straggler detection + * graceful exit and auto restart + +## NVIDIA Megatron Core 0.8.0 + +* Multimodal + * Added initial support for training vision language models using the LLaVA architecture + * Added initial support for inference with multimodal inputs + * End-to-end multimodal example from data collection to training to evaluation is provided in examples/multimodal +* MoE + * Context Parallel support. + * Distributed checkpoint support for grouped GEMM. +* Mamba + +## NVIDIA Megatron Core 0.7.0 + +* MoE + * Token drop support + * Several efficiency optimizations + * Improved model parallelism + * Memory optimizations +* Distributed checkpointing + * Enabled for Retro + * Asynchronous checkpoint saving +* Several minor bug fixes, speed improvements, and memory optimizations + +## NVIDIA Megatron Core 0.6.0 + +* MoE (Mixture of Experts) + * Performance optimization + * Communication optimization for multi GPU and Single GPU + * 23% improvement (323 TFLOPS/GPU) over MCore 0.5.0 on Mixtral with Hopper BF16 + * GroupedMLP enhancement for Hopper + * DP Overlapping. Support overlapping computation with gradient reduction and parameter gathering. + * All-to-All based Token Dispatcher + * Layer-wise logging for load balancing loss. + * Improved expert parallel support including distributed optimizer. +* Distributed optimizer +* RETRO + * Data processing +* BERT + * Distributed checkpointing +* Dist checkpointing + * PyTorch native distributed backend + * Improved saving/loading speed +* TensorRT-LLM Export + * Integration with TensorRT Model Optimizer Post-training quantization (PTQ) + * Text generation driver to perform PTQ in Megatron-LM + * Llama2 and Nemotron3-8b examples to use TensorRT-LLM unified build API to build engine after training. +* Several minor enhancements, bug fixes, and documentation updates + +## NVIDIA Megatron Core 0.5.0 + +### Key Features and Enhancements + +Megatron core documentation is now [live!](https://docs.nvidia.com/megatron-core/developer-guide/latest/user-guide/index.html#quick-start) + +### Model Features + +* MoE (Mixture of Experts) + * Support for Z-loss, Load balancing and Sinkhorn + * Layer and communications refactor + * Richer parallelism mappings and EP can be combined with other model parallel techniques for larger MoE variants, e.g. EP + TP + DP + SP + PP + * Token dropless architecture with Top-K routing + * Performance optimization with with GroupedGEMM when number of local experts is > 1 + * Distributed checkpointing +* Interleaved rotary embedding + +### Datasets + +* Masked WordPiece datasets for BERT and T5 +* Raw and mock datasets + +### Parallelism + +### Performance + +* Activation offloading to CPU +* Rope and Swiglu fusion +* Sliding window attention (via Transformer Engine) + +### General Improvements + +* Timers + +## NVIDIA Megatron Core 0.4.0 + +### Key Features and Enhancements + +#### Models + +* BERT +* RETRO +* T5 + +#### Parallelism + +* Mixture of Experts support for GPT +* Model parallel efficient Distributed Data Parallel (DDP) +* Context Parallel (2D Tensor Parallel) support + +#### Datasets + +* GPT Dataset +* Blended Dataset diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000000..6f59d98afb --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,74 @@ +# Core +[Core-ADLR] @mcore-reviewers/core-adlr +megatron/core/ + +[Core-NeMo] @mcore-reviewers/core-nemo +megatron/core/ + +^[Core-MLPerf] @mcore-reviewers/mlperf +megatron/core/ + +[GPT] @mcore-reviewers/gpt +megatron/core/models/gpt/ + +[Multimodal] @mcore-reviewers/multi-modal +megatron/core/models/multimodal/ + +[Hybrid-mamba] @mcore-reviewers/hybrid-mamba +megatron/core/models/mamba/ + +# Distributed Checkpointing +[Distributed Checkpointing] @mcore-reviewers/dist-checkpointing +megatron/core/dist_checkpointing/ + +# Distributed Optimizer +[Distributed Optimizer] @mcore-reviewers/dist-optimizer +megatron/core/optimizer/distrib_optimizer/ + +# Quantization and Inference (QAT) +[Quantization and Inference (QAT)] @mcore-reviewers/quantization-and-inference +megatron/core/inference/modelopt_support + +# Datasets +[Datasets] @mcore-reviewers/datasets +megatron/core/datasets/ + +# Parallelism +[Pipeline Parallelism] @mcore-reviewers/pipeline-parallelism +megatron/core/pipeline_parallel/ + +# Transformer +[Transformer] @mcore-reviewers/core-adlr @mcore-reviewers/core-nemo +megatron/core/transformer/ + +[MoE-ADLR] @mcore-reviewers/moe-adlr +megatron/core/transformer/moe/ + +[MoE-Moe] @mcore-reviewers/moe-moe +megatron/core/transformer/moe/ + +# Inference +[Inference] @mcore-reviewers/inference +megatron/core/inference/ + +# Parallel State +[ParallelState] @mcore-reviewers/core-adlr @mcore-reviewers/core-nemo +megatron/core/parallel_state.py + +[Post-Training] @mcore-reviewers/post-training +megatron/core/post_training/ +megatron/post_training + +[CI][1] @mcore-reviewers/ci +.gitlab/ +.github/ +.gitlab-ci.yml +Dockerfile.ci.lts +Dockerfile.ci.dev +tests/ +megatron/core/transformer/transformer_block.py +megatron/core/transformer/transformer_layer.py + +[RL] @mcore-reviewers/rl +megatron/rl/ +examples/rl/ \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..615227600c --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,66 @@ +# Contributing to Megatron-LM + +This document outlines the processes and policies for issues and pull requests by non-NVIDIA contributors to the Megatron-LM github repository. + +Everyone is welcome to contribute to the project but development of Megatron-LM continues internally at NVIDIA. When contributing it important to ensure that changes are in line with the project direction. Small changes to fix bugs are welcomed and appreciated. If proposing large architectural changes or changes for stylistic reasons open an issue first so we can discuss it. + +PRs will first be pulled into NVIDIA's internal Megatron-LM repo and then pushed back out to the open github repo with proper credit given to the committers. + +## Issue policy + +Please do file any bugs you find, keeping the following in mind: + +- If filing a bug, i.e. you have found something that doesn't work as expected, use the BUG template. +- If you've found a regression in speed or accuracy use the REGRESSION template. +- If you are requesting a new feature or modification of an existing feature use the ENHANCEMENT template. +- If opening an issue to ask a question no template is needed but please make your question as clear and concise as possible. +- One issue per bug. Putting multiple things in the same issue makes both discussion and completion unnecessarily complicated. +- Your bug is mostly likely to get attention from the development team quickly if we can easily reproduce it. +- Use proper spelling, grammar, and punctuation. +- Write in an authoritative and technical tone. + +## Code submission policy + +Here are some dos & don'ts to try and stick to: + +### Do: + +- Format new code in a style that is consistent with the file being changed. Megatron-LM doesn't (yet) have a style guide or enforced formatting. +- Split your changes into separate, atomic commits i.e. A commit per feature or fix. +- Make sure your commits are rebased on the master branch. +- Write the commit message subject line in the imperative mood ("Change the default argument for X", not "Changed the default argument for X"). +- Write your commit messages in proper English, with care and punctuation. +- Check the spelling of your code, comments and commit messages. + +### Don't: + +- Submit code that's incompatible with the project licence. +- Touch anything outside the stated scope of the PR. This includes formatting changes to code not relevant to the PR. +- Iterate excessively on your design across multiple commits. +- Include commented-out code. +- Attempt large architectural changes without first opening an issue to discuss. + +## Issue and Pull Request Q&A (Updated Jul 2023) + +### I've submitted an issue and PR. When can I expect to get some feedback? + +Megatron-LM is developed and maintained by a small team of researchers. We will endeavour to read and acknowledge all new issues and PRs within a week. A few rules of thumb: +- Reproducible bugs/regressions and bug/regression fixes are likely to get the attention of maintainers the quickest. +- Issues requesting an enhancement may only recieve acknowlegement that they've been read and may be closed with a "wontfix" label if they're not inline with the project direction. If they are acknowledged and remain open you can assume the maintainers agree they're a desirable feature. +- Support requests, i.e. requests for help running the code, have the lowest priority and will be responded to as maintainer time permits. + +### If my issue or PR isn't getting attention, how long should I wait before pinging one of the project maintainers? + +One week if there is no acknowledgement of the intial request. + +### Who are the project maintainers I should ping? + +The corresponding maintainers at this time are @jaredcasper and @jon-barker. + +### Is there a policy for issues and PRs that haven't been touched in X days? Should they be closed? + +Yes, starting in July 2023 we have a bot that will mark untouched PRs as "stale" after 60 days. + +We have a long backlog of issues and PRs dating back 3.5 years. We are trying to triage these now by working backwards. Older issues we believe may still be relevant may recieve a request to re-test them with the latest code. If there's no response they may be closed. Again, if you they should be re-opened then just respond with a comment to that effect. + +Thank-you! \ No newline at end of file diff --git a/LICENSE b/LICENSE index 281fde95a6..621920c377 100644 --- a/LICENSE +++ b/LICENSE @@ -29,13 +29,15 @@ The following applies to all files unless otherwise noted: -- This repository also contains code from Hugging Face Inc., Google Research, -Facebook (from their Fairseq and Dino projects), Microsoft(from their -Swin-Transformer project)and Philip Popien. Files from these -organizations have notices at the top of each file. Below are -licenses used in those files, as indicated. +Facebook (from their Fairseq, Dino, and ParlAI projects), Microsoft (from their +Swin-Transformer project), Philip Popien, the Mamba project (Tri Dao and +Albert Gu), and the Triton language and compiler project (Philippe Tillet and +OpenAI). Files from these organizations have notices at the top of each file. +Below are licenses used in those files, as indicated. -------------- LICENSE FOR Facebook, huggingface and Google Research code -------------- +-------------------------------------------------------------------------------------- +-- LICENSE FOR Facebook, huggingface, Google Research, LLaVA, Mamba, TinyZero and vLLM code -- Apache License @@ -240,12 +242,17 @@ licenses used in those files, as indicated. See the License for the specific language governing permissions and limitations under the License. -------------- LICENSE FOR Facebook Fairseq code -------------- +-------------------------------------------------------------------------------- +LICENSE FOR +Facebook, Inc. and its affiliates, +Meta Platforms, Inc. and its affiliates, +Microsoft Corporation, +OpenGVLab/InternVL, +Triton language and compiler, +and DeepSeek. MIT License -Copyright (c) Facebook, Inc. and its affiliates. - Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights @@ -264,113 +271,3 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------- LICENSE FOR Mircrosoft Swin transformer code -------------- - -MIT License - -Copyright (c) Microsoft Corporation. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE - - ---------------- NVIDIA Source Code License for SegFormer ----------------- -1. Definitions - -“Licensor” means any person or entity that distributes its Work. - -“Software” means the original work of authorship made available under this -License. - -“Work” means the Software and any additions to or derivative works of the -Software that are made available under this License. - -The terms “reproduce,” “reproduction,” “derivative works,” and -“distribution” have the meaning as provided under U.S. copyright law; -provided, however, that for the purposes of this License, derivative works -shall not include works that remain separable from, or merely link -(or bind by name) to the interfaces of, the Work. - -Works, including the Software, are “made available” under this License by -including in or with the Work either (a) a copyright notice referencing -the applicability of this License to the Work, or (b) a copy of this License. - -2. License Grant - -2.1 Copyright Grant. Subject to the terms and conditions of this License, -each Licensor grants to you a perpetual, worldwide, non-exclusive, -royalty-free, copyright license to reproduce, prepare derivative works of, -publicly display, publicly perform, sublicense and distribute its Work -and any resulting derivative works in any form. - -3. Limitations - -3.1 Redistribution. You may reproduce or distribute the Work only if -(a) you do so under this License, (b) you include a complete copy of this -License with your distribution, and (c) you retain without modification any -copyright, patent, trademark, or attribution notices that are present -in the Work. - -3.2 Derivative Works. You may specify that additional or different terms -apply to the use, reproduction, and distribution of your derivative works -of the Work (“Your Terms”) only if (a) Your Terms provide that the use -limitation in Section 3.3 applies to your derivative works, and (b) you -identify the specific derivative works that are subject to Your Terms. -Notwithstanding Your Terms, this License (including the redistribution -requirements in Section 3.1) will continue to apply to the Work itself. - -3.3 Use Limitation. The Work and any derivative works thereof only may -be used or intended for use non-commercially. Notwithstanding the -foregoing, NVIDIA and its affiliates may use the Work and any derivative -works commercially. As used herein, “non-commercially” means for research -or evaluation purposes only. - -3.4 Patent Claims. If you bring or threaten to bring a patent claim against -any Licensor (including any claim, cross-claim or counterclaim in a lawsuit) -to enforce any patents that you allege are infringed by any Work, then -your rights under this License from such Licensor (including the grant -in Section 2.1) will terminate immediately. - -3.5 Trademarks. This License does not grant any rights to use any Licensor’s -or its affiliates’ names, logos, or trademarks, except as necessary to -reproduce the notices described in this License. - -3.6 Termination. If you violate any term of this License, then your rights -under this License (including the grant in Section 2.1) will terminate -immediately. - -4. Disclaimer of Warranty. - -THE WORK IS PROVIDED “AS IS” WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT. -YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER THIS LICENSE. - -5. Limitation of Liability. - -EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL -THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE -SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, -INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT -OF OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK -(INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, -LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER -COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN -ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - - diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000..f52b04902f --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,4 @@ +include megatron/core/requirements.txt +include megatron/core/README.md +include megatron/core/package_info.py +recursive-include requirements * diff --git a/README.md b/README.md index 6bb334e8e1..85f21a4322 100644 --- a/README.md +++ b/README.md @@ -1,515 +1,484 @@ -Megatron ([1](https://arxiv.org/pdf/1909.08053.pdf), [2](https://arxiv.org/pdf/2104.04473.pdf), and [3](https://arxiv.org/pdf/2205.05198)) is a large, powerful transformer developed by the Applied Deep Learning Research team at NVIDIA. This repository is for ongoing research on training large transformer language models at scale. We developed efficient, model-parallel ([tensor](https://arxiv.org/pdf/1909.08053.pdf), [sequence](https://arxiv.org/pdf/2205.05198), and [pipeline](https://arxiv.org/pdf/2104.04473.pdf)), and multi-node pre-training of transformer based models such as [GPT](https://arxiv.org/abs/2005.14165), [BERT](https://arxiv.org/pdf/1810.04805.pdf), and [T5](https://arxiv.org/abs/1910.10683) using mixed precision. - -Below are some of the projects where we have directly used Megatron: -* [BERT and GPT Studies Using Megatron](https://arxiv.org/pdf/1909.08053.pdf) -* [BioMegatron: Larger Biomedical Domain Language Model](https://www.aclweb.org/anthology/2020.emnlp-main.379.pdf) -* [End-to-End Training of Neural Retrievers for Open-Domain Question Answering](https://arxiv.org/abs/2101.00408) -* [Large Scale Multi-Actor Generative Dialog Modeling](https://www.aclweb.org/anthology/2020.acl-main.8.pdf) -* [Local Knowledge Powered Conversational Agents](https://arxiv.org/abs/2010.10150) -* [MEGATRON-CNTRL: Controllable Story Generation with External Knowledge Using Large-Scale Language Models](https://www.aclweb.org/anthology/2020.emnlp-main.226.pdf) -* [RACE Reading Comprehension Dataset Leaderboard](http://www.qizhexie.com/data/RACE_leaderboard.html) -* [Training Question Answering Models From Synthetic Data](https://www.aclweb.org/anthology/2020.emnlp-main.468.pdf) -* [Few-shot Instruction Prompts for Pretrained Language Models to Detect Social Biases](https://arxiv.org/abs/2112.07868) -* [Exploring the Limits of Domain-Adaptive Training for Detoxifying Large-Scale Language Models](https://arxiv.org/abs/2202.04173) -* [Using DeepSpeed and Megatron to Train Megatron-Turing NLG 530B, A Large-Scale Generative Language Model](https://arxiv.org/abs/2201.11990) -* [Multi-Stage Prompting for Knowledgeable Dialogue Generation](https://arxiv.org/abs/2203.08745) - -Megatron is also used in [NeMo Megatron](https://developer.nvidia.com/nvidia-nemo#nemo-megatron), a framework to help enterprises overcome the challenges of building and training sophisticated natural language processing models with billions and trillions of parameters. - -Our codebase is capable of efficiently training very large (hundreds of billions of parameters) language models with both model and data parallelism. To demonstrate how the code scales with multiple GPUs and model sizes, we consider GPT models from 1 billion all the way to 1 trillion parameters. All models use a vocabulary size of 51,200 and a sequence length of 2048. We vary hidden size, number of attention heads, and number of layers to arrive at a specifc model size. As the model size increases, we also modestly increase the batch size. We leverage [NVIDIA's Selene supercomputer](https://www.top500.org/system/179842/) to perform scaling studies and use up to 3072 [A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for the largest model. Each cluster node has 8 NVIDIA 80GB A100 GPUs. The graph below shows that we scale nearly linear up to 1 trillion parameter models running on 3072 GPUs. Note that these results are from benchmark runs and these models were not trained to convergence; however, the FLOPs are measured for end-to-end training, i.e., includes all operations including data loading, optimization, and even logging. - -![Scaling Graph](images/Achieved_petaFLOPs.png) - -The following table shows both model (MFU) and hardware (HFU) FLOPs utilization for select configurations up to 1T parameters (see [our paper](https://arxiv.org/pdf/2205.05198) for a description of how these are calculated). As the model size increases, we achieve better GPU utilization and for the one trillion parameter model, we reach a MFU and HFU of 56.3% and 57.0%, respectively. Note that these numbers are also measured on benchmark runs and in this case are measured using a data parallel size of one. Data parallelism introduces some overhead due to the gradient all-reduce required between the data parallel groups. However, for large transformer models, this overhead is not large and can almost entirely eliminted by overlapping the gradient all-reduce with backpropagation. - -| Model Size | Model FLOPs Utilization | Hardware FLOPs Utilization | -| :---: | :---: | :---: | -| 22B | 41.5% | 43.7% | -| 175B | 51.4% | 52.8% | -| 530B | 56.0% | 57.0% | -| 1T | 56.3% | 57.0% | - -# Contents - * [Contents](#contents) - * [Setup](#setup) - * [Downloading Checkpoints](#downloading-checkpoints) - * [Usage](#usage) - * [Training](#training) - * [Data Preprocessing](#data-preprocessing) - * [BERT Pretraining](#bert-pretraining) - * [GPT Pretraining](#gpt-pretraining) - * [T5 Pretraining](#t5-pretraining) - * [Distributed Pretraining](#distributed-pretraining) - * [Activation Checkpointing and Recomputation](#activation-checkpointing-and-recomputation) - * [Distributed Optimizer](#distributed-optimizer) - * [FlashAttention](#flashattention) - * [GPT-3 Example](#gpt-3-example) - * [Retro](#retro) - * [Evaluation and Tasks](#evaluation-and-tasks) - * [GPT Text Generation](#gpt-text-generation) - * [GPT Evaluation](#gpt-evaluation) - * [WikiText Perplexity Evaluation](#wikitext-perplexity-evaluation) - * [LAMBADA Cloze Accuracy](#lambada-cloze-accuracy) - * [BERT Task Evaluation](#bert-task-evaluation) - * [RACE Evaluation](#race-evaluation) - * [MNLI Evaluation](#mnli-evaluation) - * [Datasets](#datasets) - * [Collecting Wikipedia Training Data](#collecting-wikipedia-training-data) - * [Collecting GPT Webtext Data](#collecting-gpt-webtext-data) - -# Setup -We strongly recommend using the latest release of [NGC's PyTorch container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch) with DGX nodes. If you can't use this for some reason, use the latest pytorch, cuda, nccl, and NVIDIA [APEX](https://github.com/NVIDIA/apex#quick-start) releases. Data preprocessing requires [NLTK](https://www.nltk.org/install.html), though this is not required for training, evaluation, or downstream tasks. - -You can launch an instance of the PyTorch container and mount Megatron, your dataset, and checkpoints with the following Docker commands: +
+ +Megatron-LM & Megatron Core +=========================== +

GPU-optimized library for training transformer models at scale

+ +[![Documentation](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](https://docs.nvidia.com/Megatron-Core/developer-guide/latest/index.html) +[![version](https://img.shields.io/badge/release-0.12.0-green)](./CHANGELOG.md) +[![license](https://img.shields.io/badge/license-Apache-blue)](./LICENSE) + +
+ +## ⚡ Quick Start + +```bash +# 1. Install Megatron Core with required dependencies +pip install megatron-core +pip install --no-build-isolation transformer-engine[pytorch] + +# 2. Clone repository for examples +git clone https://github.com/NVIDIA/Megatron-LM.git +cd Megatron-LM +``` + +**→ [Complete Installation Guide](#installation)** - Docker, pip variants (dev,lts,etc.), source installation, and system requirements + +# Latest News + +- 🔄 NEW! **[Megatron Bridge](https://github.com/NVIDIA-NeMo/Megatron-Bridge)** - Bidirectional converter for interoperability between Hugging Face and Megatron checkpoints, featuring production-ready recipes for popular models. +- 🗺️ **[MoE Q3-Q4 2025 Roadmap](https://github.com/NVIDIA/Megatron-LM/issues/1729)** - Comprehensive roadmap for MoE features including DeepSeek-V3, Qwen3, advanced parallelism strategies, FP8 optimizations, and Blackwell performance enhancements. +- 🚀 **[GPT-OSS Implementation](https://github.com/NVIDIA/Megatron-LM/issues/1739)** - Advanced features including YaRN RoPE scaling, attention sinks, and custom activation functions are being integrated into Megatron Core. +- **[2025/06]** **[Megatron MoE Model Zoo](https://github.com/yanring/Megatron-MoE-ModelZoo)** - Best practices and optimized configurations for training DeepSeek-V3, Mixtral, and Qwen3 MoE models with performance benchmarking and checkpoint conversion tools. +- **[2025/05]** Megatron Core v0.11.0 brings new capabilities for multi-data center LLM training ([blog](https://developer.nvidia.com/blog/turbocharge-llm-training-across-long-haul-data-center-networks-with-nvidia-nemo-framework/)). + +
+Previous News + +- **[2024/07]** Megatron Core v0.7 improves scalability and training resiliency and adds support for multimodal training ([blog](https://developer.nvidia.com/blog/train-generative-ai-models-more-efficiently-with-new-nvidia-Megatron-Core-functionalities/)). +- **[2024/06]** Megatron Core added supports for Mamba-based models. Check out our paper [An Empirical Study of Mamba-based Language Models](https://arxiv.org/pdf/2406.07887) and [code example](https://github.com/NVIDIA/Megatron-LM/tree/ssm/examples/mamba). +- **[2024/01 Announcement]** NVIDIA has released the core capabilities in **Megatron-LM** into [**Megatron Core**](https://github.com/NVIDIA/Megatron-LM/tree/main/megatron/core) in this repository. Megatron Core expands upon Megatron-LM's GPU-optimized techniques with more cutting-edge innovations on system-level optimizations, featuring composable and modular APIs. Explore the [Megatron Core intro](#Megatron Core) for more details. + +
+ +
+Table of Contents + +**Getting Started** +- [Quick Start](#-quick-start) +- [Latest News](#latest-news) +- [Megatron Overview](#megatron-overview) + - [Project Structure](#project-structure) + - [Megatron-LM: Reference Implementation](#megatron-lm-reference-implementation) + - [Megatron Core: Production Library](#megatron-core-production-library) +- [Installation](#installation) + - [Docker (Recommended)](#-docker-recommended) + - [Pip Installation](#-pip-installation) + - [Source Installation](#-source-installation) + - [System Requirements](#system-requirements) + +**Core Features** +- [Performance Benchmarking](#performance-benchmarking) + - [Weak Scaling Results](#weak-scaling-results) + - [Strong Scaling Results](#strong-scaling-results) +- [Ecosystem Libraries](#ecosystem-libraries) + +**Training** +- [Training](#training) + - [Getting Started](#getting-started) + - [Data Preparation](#data-preparation) +- [Parallelism Strategies](#parallelism-strategies) + - [Data Parallelism (DP)](#data-parallelism-dp) + - [Tensor Parallelism (TP)](#tensor-parallelism-tp) + - [Pipeline Parallelism (PP)](#pipeline-parallelism-pp) + - [Context Parallelism (CP)](#context-parallelism-cp) + - [Expert Parallelism (EP)](#expert-parallelism-ep) + - [Parallelism Selection Guide](#parallelism-selection-guide) +- [Performance Optimizations](#performance-optimizations) + +**Resources** +- [Examples](./examples/) - Training scripts and tutorials +- [Documentation](https://docs.nvidia.com/Megatron-Core/) - Official docs +- [Roadmaps](#roadmaps) - Development roadmaps and feature tracking +- [Community & Support](#-community--support) - Get help and contribute + - [Getting Help](#getting-help) + - [Contributing](#contributing) + - [Citation](#citation) + +
+ +# Megatron Overview + +## Project Structure ``` -docker pull nvcr.io/nvidia/pytorch:xx.xx-py3 -docker run --gpus all -it --rm -v /path/to/megatron:/workspace/megatron -v /path/to/dataset:/workspace/dataset -v /path/to/checkpoints:/workspace/checkpoints nvcr.io/nvidia/pytorch:xx.xx-py3 +Megatron-LM/ +├── megatron/ +│ ├── core/ # Megatron Core (kernels, parallelism, building blocks) +│ │ ├── models/ # Transformer models +│ │ ├── transformer/ # Transformer building blocks +│ │ ├── tensor_parallel/ # Tensor parallelism +│ │ ├── pipeline_parallel/ # Pipeline parallelism +│ │ ├── distributed/ # Distributed training (FSDP, DDP) +│ │ ├── optimizer/ # Optimizers +│ │ ├── datasets/ # Dataset loaders +│ │ ├── inference/ # Inference engines +│ │ └── export/ # Model export (e.g. TensorRT-LLM) +│ ├── training/ # Training scripts +│ ├── inference/ # Inference server +│ ├── legacy/ # Legacy components +│ └── post_training/ # Post-training (RLHF, etc.) +├── examples/ # Ready-to-use training examples +├── tools/ # Utility tools +├── tests/ # Comprehensive test suite +└── docs/ # Documentation ``` -## Downloading Checkpoints -We have provided pretrained [BERT-345M](https://ngc.nvidia.com/catalog/models/nvidia:megatron_bert_345m) and [GPT-345M](https://ngc.nvidia.com/catalog/models/nvidia:megatron_lm_345m) checkpoints for use to evaluate or finetuning downstream tasks. To access these checkpoints, first [sign up](https://ngc.nvidia.com/signup) for and [setup](https://ngc.nvidia.com/setup/installers/cli) the NVIDIA GPU Cloud (NGC) Registry CLI. Further documentation for downloading models can be found in the [NGC documentation](https://docs.nvidia.com/dgx/ngc-registry-cli-user-guide/index.html#topic_6_4_1). +### Megatron-LM: Reference Implementation +**Reference implementation** that includes Megatron Core plus everything needed to train models. -Alternatively, you can directly download the checkpoints using: +**Best for:** +- **Training state-of-the-art foundation models** at scale with cutting-edge performance on latest NVIDIA hardware +- **Research teams** exploring new architectures and training techniques +- **Learning distributed training** concepts and best practices +- **Quick experimentation** with proven model configurations -
-BERT-345M-uncased: wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_uncased/zip -O megatron_bert_345m_v0.1_uncased.zip
-BERT-345M-cased: wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_cased/zip -O megatron_bert_345m_v0.1_cased.zip
-GPT-345M: wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O megatron_lm_345m_v0.0.zip
-
+**What you get:** +- Pre-configured training scripts for GPT, LLama, DeepSeek, Qwen, and more. +- End-to-end examples from data prep to evaluation +- Research-focused tools and utilities -The models require vocabulary files to run. The BERT WordPiece vocab file can be extracted from Google's pretrained BERT models: [uncased](https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt), [cased](https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt). The GPT [vocab file](https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json) and [merge table](https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt) can be downloaded directly. +### Megatron Core: Composable Library +**Composable library** with GPU-optimized building blocks for custom training frameworks. -# Usage +**Best for:** +- **Framework developers** building on top of modular and optimized components +- **Research teams** needing custom training loops, optimizers, or data pipelines +- **ML engineers** requiring fault-tolerant training pipelines -After installation, there are several possible workflows. The most comprehensive is: -1. Data preprocessing -2. Pretraining -3. Finetuning (Optional for zero-shot tasks) -4. Downstream task evaluation or text generation +**What you get:** +- Composable transformer building blocks (attention, MLP, etc.) +- Advanced parallelism strategies (TP, PP, DP, EP, CP) +- Pipeline schedules and distributed optimizers +- Mixed precision support (FP16, BF16, FP8) +- GPU-optimized kernels and memory management +- High-performance dataloaders and dataset utilities +- Model architectures (LLaMA, Qwen, GPT, Mixtral, Mamba, etc.) -However, steps 1 and 2 can be replaced by using one of the pretrained models mentioned above. +## Ecosystem Libraries -We've provided several scripts for pretraining both BERT and GPT in [`examples`](./examples) directory, as well as scripts for both zero-shot and fine-tuned downstream tasks including MNLI, RACE, WikiText103, and LAMBADA evaluation. There is also a script for GPT interactive text generation. +**Libraries used by Megatron Core:** -# Training -## Data Preprocessing -The training data requires preprocessing. First, place your training data in a loose json format, with one json containing a text sample per line. For example: -
-{"src": "www.nvidia.com", "text": "The quick brown fox", "type": "Eng", "id": "0", "title": "First Part"}
-{"src": "The Internet", "text": "jumps over the lazy dog", "type": "Eng", "id": "42", "title": "Second Part"}
-
+- **[Megatron Energon](https://github.com/NVIDIA/Megatron-Energon)** 📣 **NEW!** - Multi-modal data loader (text, images, video, audio) with distributed loading and dataset blending +- **[Transformer Engine](https://github.com/NVIDIA/TransformerEngine)** - Optimized kernels and FP8 mixed precision support +- **[Resiliency Extension (NVRx)](https://github.com/NVIDIA/nvidia-resiliency-ext)** - Fault tolerant training with failure detection and recovery -The name of the `text` field of the json can be changed by using the `--json-key` flag in [`preprocess_data.py`](./tools/preprocess_data.py) The other metadata are optional and are not used in training. +**Libraries using Megatron Core:** -The loose json is then processed into a binary format for training. To convert the json into mmap, cached index file, or the lazy loader format use `preprocess_data.py`. Set the `--dataset-impl` flag to `mmap`, `cached`, or `lazy`, respectively (default is `mmap`). An example script to prepare data for BERT training is: -
-python tools/preprocess_data.py \
-       --input my-corpus.json \
-       --output-prefix my-bert \
-       --vocab bert-vocab.txt \
-       --dataset-impl mmap \
-       --tokenizer-type BertWordPieceLowerCase \
-       --split-sentences
-
- -The output will be two files named, in this case, `my-bert_text_sentence.bin` and `my-bert_text_sentence.idx`. The `--data-path` specified in later BERT training is the full path and new filename, but without the file extension. - -For T5 use the same preprocessing as BERT, perhaps renaming it to: -
-       --output-prefix my-t5 \
-
- -Some minor modifications are required for GPT data preprocessing, namely, the addition of a merge table, an end-of-document token, removal of sentence splitting, and a change to the tokenizer type: -
-python tools/preprocess_data.py \
-       --input my-corpus.json \
-       --output-prefix my-gpt2 \
-       --vocab gpt2-vocab.json \
-       --dataset-impl mmap \
-       --tokenizer-type GPT2BPETokenizer \
-       --merge-file gpt2-merges.txt \
-       --append-eod
-
+- **[Megatron Bridge](https://github.com/NVIDIA-NeMo/Megatron-Bridge)** - Training library with bidirectional Hugging Face ↔ Megatron checkpoint conversion, flexible training loops, and production-ready recipes +- **[NeMo RL](https://github.com/NVIDIA-NeMo/RL)** - Scalable toolkit for efficient reinforcement learning with RLHF, DPO, and other post-training methods +- **[NeMo Framework](https://docs.nvidia.com/nemo-framework/user-guide/latest/overview.html)** - Enterprise framework with cloud-native support and end-to-end examples +- **[TensorRT Model Optimizer (ModelOpt)](https://github.com/NVIDIA/TensorRT-Model-Optimizer)** - Model optimization toolkit for quantization, pruning, and distillation + +**Compatible with:** [Hugging Face Accelerate](https://github.com/huggingface/accelerate), [Colossal-AI](https://github.com/hpcaitech/ColossalAI), [DeepSpeed](https://github.com/microsoft/DeepSpeed) + +# Installation + +## 🐳 Docker (Recommended) + +We strongly recommend using the previous releases of [PyTorch NGC Container](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch) rather than the latest one for optimal compatibility with Megatron Core release and testing. Our releases are always based on the previous month's NGC container, so this ensures compatibility and stability. + +This container comes with all dependencies pre-installed with compatible versions and optimized configurations for NVIDIA GPUs: + +- PyTorch (latest stable version) +- CUDA, cuDNN, NCCL (latest stable versions) +- Support for FP8 on NVIDIA Hopper, Ada, and Blackwell GPUs +- For best performance, use NVIDIA Turing GPU architecture generations and later + +```bash +# Run container with mounted directories +docker run --runtime --nvidia --gpus all -it --rm \ + -v /path/to/megatron:/workspace/megatron \ + -v /path/to/dataset:/workspace/dataset \ + -v /path/to/checkpoints:/workspace/checkpoints \ + nvcr.io/nvidia/pytorch:25.04-py3 +``` + +## Pip Installation + +Megatron Core offers support for two NGC PyTorch containers: + +- `dev`: Moving head that supports the most recent upstream dependencies +- `lts`: Long-term support of NGC PyTorch 24.01 + +Both containers can be combined with `mlm` which adds package dependencies for Megatron-LM on top of Megatron Core. + +```bash +# Install the latest release with minimal dependencies (no Transformer Engine) +pip install megatron-core[dev] +``` + +```bash +# Install packages for LTS support NGC PyTorch 24.01 +pip install megatron-core[lts] +``` + +For a version of Megatron Core with only torch, run: + +```bash +pip install megatron-core +``` + +For dependencies required by Megatron-LM, please run: + +```bash +pip install megatron-core[mlm] +``` + +## Source Installation + +For development or latest features: + +For Hybrid models, Megatron Core requires [mamba](https://github.com/state-spaces/mamba). If the pre-built wheel in PyPI does not fit your environment, you can fall back to an install script Megatron Core uses in its CI system. For this, please install `uv` first: -Here the output files are named `my-gpt2_text_document.bin` and `my-gpt2_text_document.idx`. As before, in GPT training, use the longer name without the extension as `--data-path`. +```bash +export UV_VERSION=0.7.2 +export PATH="$HOME/.local/bin:$PATH" +curl -LsSf https://astral.sh/uv/${UV_VERSION}/install.sh | sh +export UV_PROJECT_ENVIRONMENT=./venv +export PATH="$UV_PROJECT_ENVIRONMENT/bin:$PATH" +export UV_LINK_MODE=copy +``` + +Run the following command to build upstream dependencies from source: + +```bash +# Clone and install +git clone https://github.com/NVIDIA/Megatron-LM.git +cd Megatron-LM + +# Optional: checkout specific release +git checkout core_r0.13.0 + +bash docker/common/install.sh --environment {dev,lts} +``` + +## System Requirements + +### Hardware Requirements +- **FP8 Support**: NVIDIA Hopper, Ada, Blackwell GPUs +- **Recommended**: NVIDIA Turing architecture or later + +### Software Requirements +- **CUDA/cuDNN/NCCL**: Latest stable versions +- **PyTorch**: Latest stable version +- **Transformer Engine**: Latest stable version +- **Python**: 3.12 recommended -Further command line arguments are described in the source file [`preprocess_data.py`](./tools/preprocess_data.py). +# Performance Benchmarking -## BERT Pretraining +For our latest performance benchmarking results, please refer to [NVIDIA NeMo Framework Performance Summary](https://docs.nvidia.com/nemo-framework/user-guide/latest/performance/performance_summary.html). +Our codebase efficiently trains models from 2B to 462B parameters across thousands of GPUs, achieving up to **47% Model FLOP Utilization (MFU)** on H100 clusters. -The [`examples/pretrain_bert.sh`](./examples/pretrain_bert.sh) script runs single GPU 345M parameter BERT pretraining. Debugging is the primary use for single GPU training, as the code base and command line arguments are optimized for highly distributed training. Most of the arguments are fairly self-explanatory. By default, the learning rate decays linearly over the training iterations starting at `--lr` to a minimum set by `--min-lr` over `--lr-decay-iters` iterations. The fraction of training iterations used for warmup is set by `--lr-warmup-fraction`. While this is single GPU training, the batch size specified by `--micro-batch-size` is a single forward-backward path batch-size and the code will perform gradient accumulation steps until it reaches `global-batch-size` which is the batch size per iteration. The data is partitioned into a 949:50:1 ratio for training/validation/test sets (default is 969:30:1). This partitioning happens on the fly, but is consistent across runs with the same random seed (1234 by default, or specified manually with `--seed`). We use `train-iters` as the training iterations requested. Alternatively, one can provide `--train-samples` which is total number of samples to train on. If this option is present, then instead of providing `--lr-decay-iters`, one will need to provide `--lr-decay-samples`. +![Model table](images/model_table.png) -The logging, checkpoint-saving, and evaluation intervals are specified. Checkpointing the activations facilitates the training of larger models and/or batches. Note that the `--data-path` now includes the additional `_text_sentence` suffix added in preprocessing, but does not include the file extensions. +**Benchmark Configuration:** +- **Vocabulary size**: 131,072 tokens +- **Sequence length**: 4096 tokens +- **Model scaling**: Varied hidden size, attention heads, and layers to achieve target parameter counts +- **Communication optimizations**: Fine-grained overlapping with DP (`--overlap-grad-reduce`, `--overlap-param-gather`), TP (`--tp-comm-overlap`), and PP (enabled by default) -Further command line arguments are described in the source file [`arguments.py`](./megatron/arguments.py). +**Key Results:** +- **6144 H100 GPUs**: Successfully benchmarked 462B parameter model training +- **Superlinear scaling**: MFU increases from 41% to 47-48% with model size +- **End-to-end measurement**: Throughputs include all operations (data loading, optimizer steps, communication, logging) +- **Production ready**: Full training pipeline with checkpointing and fault tolerance +- *Note: Performance results measured without training to convergence* -To run `examples/pretrain_bert.sh`, make any desired modifications including setting the environment variables for `CHECKPOINT_PATH`, `VOCAB_FILE`, and `DATA_PATH`. Make sure to set these variables to their paths in the container. Then launch the container with Megatron and necessary paths mounted (as explained in [Setup](#setup)) and run the example script. +## Weak Scaling Results +Our weak scaled results show superlinear scaling (MFU increases from 41% for the smallest model considered to 47-48% for the largest models); this is because larger GEMMs have higher arithmetic intensity and are consequently more efficient to execute. -## GPT Pretraining +![Weak scaling](images/weak_scaling.png) -The `examples/pretrain_gpt.sh` script runs single GPU 345M parameter GPT pretraining. As mentioned above, single GPU training is primarily intended for debugging purposes, as the code is optimized for distributed training. +## Strong Scaling Results +We also strong scaled the standard GPT-3 model (our version has slightly more than 175 billion parameters due to larger vocabulary size) from 96 H100 GPUs to 4608 GPUs, using the same batch size of 1152 sequences throughout. Communication becomes more exposed at larger scale, leading to a reduction in MFU from 47% to 42%. -It follows largely the same format as the previous BERT script with a few notable differences: the tokenization scheme used is BPE (which requires a merge table and a `json` vocabulary file) instead of WordPiece, the model architecture allows for longer sequences (note that the max position embedding must be greater than or equal to the maximum sequence length), and the `--lr-decay-style` has been set to cosine decay. Note that the `--data-path` now includes the additional `_text_document` suffix added in preprocessing, but does not include the file extensions. +![Strong scaling](images/strong_scaling.png) -Further command line arguments are described in the source file [`arguments.py`](./megatron/arguments.py). +# Training + +## Getting Started -`examples/pretrain_gpt.sh` can be launched the same way as described for BERT. Set the env vars and make any other modifications, launch the container with appropriate mounts, and run the script. +### Simple Training Example +```bash +# Distributed training example (2 GPUs, mock data) +torchrun --nproc_per_node=2 examples/run_simple_mcore_train_loop.py +``` + +### LLama-3 Training Example +```bash +# 8 GPUs, FP8 precision, mock data +./examples/llama/train_llama3_8b_fp8.sh +``` -## T5 Pretraining +## Data Preparation -Very similar to BERT and GPT, the `examples/pretrain_t5.sh` script runs single GPU "base" (~220M parameter) T5 pretraining. The primary difference from BERT and GPT is the addition of the following arguments to accommodate the T5 architecture: +### JSONL Data Format +```json +{"text": "Your training text here..."} +{"text": "Another training sample..."} +``` + +### Basic Preprocessing +```bash +python tools/preprocess_data.py \ + --input data.jsonl \ + --output-prefix processed_data \ + --tokenizer-type HuggingFaceTokenizer \ + --tokenizer-model /path/to/tokenizer.model \ + --workers 8 \ + --append-eod +``` -* `--kv-channels` sets the inner dimension of the "key" and "value" matrices of all attention mechanisms in the model. For BERT and GPT this defaults to the hidden size divided by the number of attention heads, but can be configured for T5. +### Key Arguments +- `--input`: Path to input JSON/JSONL file +- `--output-prefix`: Prefix for output binary files (.bin and .idx) +- `--tokenizer-type`: Tokenizer type (`HuggingFaceTokenizer`, `GPT2BPETokenizer`, etc.) +- `--tokenizer-model`: Path to tokenizer model file +- `--workers`: Number of parallel workers for processing +- `--append-eod`: Add end-of-document token -* `--ffn-hidden-size` sets the hidden size in the feed-forward networks within a transformer layer. For BERT and GPT this defaults to 4 times the transformer hidden size, but can be configured for T5. + -* `--encoder-seq-length` and `--decoder-seq-length` set the sequence length for the encoder and decoder separately. +# Parallelism Strategies -All of the other arguments remain as they were for BERT and GPT pretraining. Run this example with the same steps described above for the other scripts. +## Data Parallelism (DP) -## Distributed Pretraining +### Standard Data Parallel +```bash +# Standard DDP - replicate model on each GPU +torchrun --nproc_per_node=8 pretrain_gpt.py \ + --data-parallel-sharding-strategy no_shard +``` -The `examples/pretrain_{bert,gpt,t5}_distributed.sh` scripts use the PyTorch distributed launcher for distributed training. As such, multi-node training can be achieved by properly setting environment variables. See the official PyTorch [documentation](https://pytorch.org/docs/stable/elastic/run.html#launcher-api) for further description of these [environment variables](https://pytorch.org/docs/stable/distributed.html#environment-variable-initialization). By default, multi-node training uses the [nccl](https://developer.nvidia.com/nccl) distributed backend. A simple set of additional arguments and the use of the PyTorch distributed module with the `torchrun` elastic launcher (equivalent to `python -m torch.distributed.run`) are the only additional requirements to adopt distributed training. See any of `examples/pretrain_{bert,gpt,t5}_distributed.sh` for more details. +### Fully Sharded Data Parallel (FSDP) +```bash +# Megatron's optimized FSDP (~15% faster than PyTorch FSDP2) +--use-custom-fsdp -We use two types of parallelism: data and model parallelism. We facilitate two distributed data parallel implementations: a simple one of our own that performs gradient all-reduce at the end of back propagation step, and Torch's distributed data parallel wrapper that overlaps gradient reduction with back propagation computation. To switch between these two options use `--DDP-impl local` or `--DDP-impl torch`, respectively. As expected, Torch distributed data parallelism is more efficient at larger model sizes. For example, for the 8.3 billion parameters model running on 512 GPUs, the scaling increases from 60% to 76% when Torch's distributed data parallel is used. However, the overlapping method requires more memory and for some configurations (e.g., 2.5 billion parameters using 2-way model parallel and 1.2 billion parameters with no model parallel) can make the overall training slower as a result. We empirically found that using a smaller model in those cases improves the training time. +# PyTorch FSDP2 +--use-torch-fsdp2 -Second, we developed a simple and efficient two-dimensional model-parallel approach. To use tensor model parallelism (splitting execution of a single transformer module over multiple GPUs, see Section 3 of [our paper](https://arxiv.org/pdf/1909.08053.pdf)), add the `--tensor-model-parallel-size` flag to specify the number of GPUs among which to split the model, along with the arguments passed to the distributed launcher as mentioned above. To use sequence parallelism specify `--sequence-parallel`, which requires tensor model parallel as it split among the same GPUs (more details in Section 4.2.2 of [our paper](https://arxiv.org/pdf/2205.05198.pdf)). +# Sharding strategies +--data-parallel-sharding-strategy optim # Shard optimizer states (ZeRO-1) +--data-parallel-sharding-strategy optim_grads # Shard gradients + optimizer (ZeRO-2) +--data-parallel-sharding-strategy optim_grads_params # Shard parameters + gradients + optimizer (ZeRO-3) +``` -To use pipeline model parallelism (sharding the transformer modules into stages with an equal number of transformer modules on each stage, and then pipelining execution by breaking the batch into smaller microbatches, see Section 2.2 of [our paper](https://arxiv.org/pdf/2104.04473.pdf)), use the `--pipeline-model-parallel-size` flag to specify the number of stages to split the model into (e.g., splitting a model with 24 transformer layers across 4 stages would mean each stage gets 6 transformer layers each). +## Tensor Parallelism (TP) +Split individual model layers across GPUs: +```bash +--tensor-model-parallel-size 4 # 4-way tensor parallelism +--sequence-parallel # Enable sequence parallelism (recommended with TP) +``` - +## Pipeline Parallelism (PP) +Split model depth across GPUs: +```bash +--pipeline-model-parallel-size 8 # 8 pipeline stages +--virtual-pipeline-model-parallel-size 4 # Virtual pipeline for better load balancing +``` -We have examples of how to use these two different forms of model parallelism the example scripts ending in `distributed_with_mp.sh`: +## Context Parallelism (CP) +Split long sequences across GPUs for handling long contexts: +```bash +--context-parallel-size 2 # 2-way context parallelism +--cp-comm-type p2p # Communication: p2p, a2a, allgather, a2a+p2p +--hierarchical-context-parallel-sizes 2 4 # Hierarchical context parallelism +``` -Other than these minor changes, the distributed training is identical to the training on a single GPU. +## Expert Parallelism (EP) +For Mixture of Experts (MoE) models: +```bash +--expert-model-parallel-size 4 # 4-way expert parallelism +--num-experts 8 # 8 experts per MoE layer +--moe-grouped-gemm # Optimize expert computation +``` -The interleaved pipelining schedule (more details in Section 2.2.2 of [our paper](https://arxiv.org/pdf/2104.04473.pdf)) can be enabled using the `--num-layers-per-virtual-pipeline-stage` argument, which controls the number of transformer layers in a virtual stage (by default with the non-interleaved schedule, each GPU will execute a single virtual stage with `NUM_LAYERS / PIPELINE_MP_SIZE` transformer layers). The total number of layers in the transformer model should be divisible by this argument value. Additionally, the number of microbatches in the pipeline (computed as `GLOBAL_BATCH_SIZE / (DATA_PARALLEL_SIZE * MICRO_BATCH_SIZE)`) should be divisible by the `PIPELINE_MP_SIZE` when using this schedule (this condition is checked in an assertion in the code). The interleaved schedule is not supported for pipelines with 2 stages (`PIPELINE_MP_SIZE=2`). +## Combining Parallelism Strategies -## Activation Checkpointing and Recomputation +### Parallelism Selection Guide -To reduce GPU memory usage so deploy a large model to a training system, we support activation checkpointing and recomputation. We support two levels of recompute granularity: `selective` and `full`. Selective recomputation is the default and recommended in almost all cases. It saves the activations that take less space and are expensive to recompute and recomputes activations that take a lot of space but are relatively cheap to recompute (see [our paper](https://arxiv.org/pdf/2205.05198) for details). To enable selective activation recompute simply use `--recompute-activations`. +Based on [NVIDIA NeMo production configurations](https://github.com/NVIDIA/NeMo/tree/main/scripts/performance/recommended_model_configs): -For cases where memory is very tight, `full` checkpointing saves just the inputs to a transformer layer, or a block of transformer layers, and recomputes everything else. To turn on full activation recompute use `--recompute-granularity full`. When using full activation recomputation, there are two methods: `uniform` and `block`, chosen using the `--recompute-method` argument. +| Model | Size | GPUs | TP | PP | CP | EP | Notes | +|-------|------|------|----|----|----|----|-------| +| **LLama-3** | 8B | 8 | 1 | 1 | 2 | 1 | CP for long seqlen (8K) | +| **LLama-3** | 70B | 64 | 4 | 4 | 2 | 1 | TP+PP | +| **LLama-3.1** | 405B | 1024 | 8 | 8 | 2 | 1 | 3D parallelism for scale | +| **GPT-3** | 175B | 128-512 | 4 | 8 | 1 | 1 | Large model config | +| **Mixtral** | 8x7B | 64 | 1 | 4 | 1 | 8 | EP for MoE | +| **Mixtral** | 8x22B | 256 | 4 | 4 | 8 | 8 | Combined TP+EP for large MoE | +| **DeepSeek-V3** | 671B | 1024 | 2 | 16 | 1 | 64 | Large MoE config | -* Uniform method uniformly divides the Transformer layers into groups of layers and stores the input activations of each group in the memory. The baseline group size is 1 and, in this case, the input activation of each Transformer layer is checkpointed. When the GPU memory is insufficient, increasing the number of layers per group reduces the memory usage thus enables running a bigger model. For example, when using the number of layers per group of 4, the input activation of each group of 4 Transformer layers is checkpointed. +### MoE-Specific Requirements -* Block method checkpoints the input activations of a set number of individual Transformer layers per pipeline stage and do the rest of layers without any checkpointing. This method can be used to skip checkpointing some Transformer layers until the GPU memory is fully used, which is applicable only when there is unused GPU memory. Checkpointing fewer transformer layers avoids unnecessary activation recomputation in the backprop thus improves training performance. For example, when we specify 5 layers to checkpoint of 8 layers per pipeline stage, the input activations of only the first 5 Transformer layers are checkpointed and activation recomputation for the rest 3 layers is not needed in the backprop. +**Important**: When combining Expert Parallelism (EP) with Tensor Parallelism (TP), **Sequence Parallelism (SP) must be enabled**. +## Performance Optimizations -## Distributed Optimizer +| Feature | Flag | Benefit | +|---------|------|---------| +| **FlashAttention** | `--attention-backend` | Faster attention and lower memory usage | +| **FP8 Training** | `--fp8-hybrid` | Faster training | +| **Activation Checkpointing** | `--recompute-activations` | Reduced memory usage | +| **Data Parallelism Communication Overlap** | `--overlap-grad-reduce` | Faster distributed training | +| **Distributed Optimizer** | `--use-distributed-optimizer` | Reduced checkpointing time | -Usage: `--use-distributed-optimizer`. Compatible with all model and data types. +**→ [NVIDIA NeMo Framework Performance Tuning Guide](https://docs.nvidia.com/nemo-framework/user-guide/latest/performance/performance-guide.html#performance-tuning-guide)** - Comprehensive performance optimization guide covering advanced tuning techniques, communication overlaps, memory optimizations, and profiling options. -The distributed optimizer is a memory savings technique, whereby the optimizer state is evenly distributed across data parallel ranks (versus the traditional method of replicating the optimizer state across data parallel ranks). As described in [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054), our implementation distributes all optimizer state that does not overlap with the model state. For example, when using fp16 model params, the distributed optimizer maintains its own separate copy of fp32 main params & grads, which are distributed across DP ranks. When using bf16 model params, however, the distributed optimizer's fp32 main grads are the same as the model's fp32 grads, and so the grads in this case are not distributed (although the fp32 main params are still distributed, as they are separate from the bf16 model params). +### FlashAttention +[FlashAttention](https://github.com/Dao-AILab/flash-attention) is a fast and memory-efficient attention algorithm. We recommend the default usage, which uses cuDNN for attention via Transformer Engine and provides up to 50% speedups on forward and 84% on backward propagation with FP8 kernels. The `flash-attn` package is also supported via `--use-flash-attn`. -Theoretical memory savings vary depending on the combination of the model's param dtype and grad dtype. In our implementation, the theoretical number of bytes per parameter is (where 'd' is the data parallel size): +### Mixed Precision Training +```bash +--fp16 # Standard FP16 +--bf16 # BFloat16 (recommended for large models) +--fp8-hybrid # FP8 training (Hopper, Ada, and Blackwell GPUs) +``` -| | Non-distributed optim | Distributed optim | -|-|-|-| -| fp16 param, fp16 grads | 20 | 4 + 16/d | -| bf16 param, fp32 grads | 18 | 6 + 12/d | -| fp32 param, fp32 grads | 16 | 8 + 8/d | +### Activation Checkpointing and Recomputation +```bash +# For limited memory +--recompute-activations -## FlashAttention +# For extreme memory constraints +--recompute-granularity full \ +--recompute-method uniform +``` -Usage: `--use-flash-attn`. Support attention head dimensions at most 128. +### Data Parallelism Communication Overlap -[FlashAttention](https://github.com/HazyResearch/flash-attention) is a fast and -memory-efficient algorithm to compute exact attention. It speeds up model -training and reduces memory requirement. +```bash +--overlap-grad-reduce +--overlap-param-gather +``` -To install FlashAttention: -```sh -pip install flash-attn +### Distributed Optimizer +```bash +--use-distributed-optimizer ``` -## GPT-3 Example - -In `examples/pretrain_gpt3_175B.sh` we have provided an example of how to configure Megatron to run [GPT-3](https://arxiv.org/abs/2005.14165) with 175 billion parameters on 1024 GPUs. The script is designed for [slurm](https://slurm.schedmd.com/documentation.html) with [pyxis](https://github.com/NVIDIA/pyxis) plugin but can be easily adopted to any other scheduler. It uses 8-way and 16-way tensor and pipeline parallelism, respectively. With options `global-batch-size 1536` and `rampup-batch-size 16 16 5859375`, the training will start with global batch size 16 and linearly increase the global batch size to 1536 over 5,859,375 samples with incrmeental steps 16. The training dataset can be either a single set or a multiple datasets combined with a set of weights. - -With full global batch size of 1536 on 1024 A100 GPUs, each iteration takes around 32 seconds resulting in 138 teraFLOPs per GPU which is 44% of the theoretical peak FLOPs. - - -## Retro - -See: - -- `tools/retro/README.md` for an overview. -- `tools/retro/examples/get_preprocess_cmd.sh` for an example of common preprocessing arguments. -- `tools/retro/examples/preprocess_data.sh` for an example of how to preprocess data. -- `tools/retro/examples/pretrain_model.sh` for an example of how to pretrain a model. - -Retro is a retrieval-enhanced model that is based on GPT. As described in [Improving language models by retrieving from trillions of tokens](https://arxiv.org/abs/2112.04426), Retro retrieves from a database of document chunks by performing locality search using a sample's tokens. The retrieval database can be large -- often billions or even trillions of tokens -- and provides a more efficient storage mechanism of factual knowledge, when compared to storing factual knowledge implicitly within the network's parameters. - -Using Retro requires two steps: 1) preprocessing the retrieval database and pretraining neighbors, and 2) pretraining a model using this data. Please see `tools/retro/README.md` for a detailed overview. - - - -# Evaluation and Tasks - -We provide several command line arguments, detailed in the scripts listed below, to handle various zero-shot and fine-tuned downstream tasks. However, you can also finetune your model from a pretrained checkpoint on other corpora as desired. To do so, simply add the `--finetune` flag and adjust the input files and training parameters within the original training script. The iteration count will be reset to zero, and the optimizer and internal state will be reinitialized. If the fine-tuning is interrupted for any reason, be sure to remove the `--finetune` flag before continuing, otherwise the training will start again from the beginning. - -Because evaluation requires substantially less memory than training, it may be advantageous to merge a model trained in parallel for use on fewer GPUs in downstream tasks. The following script accomplishes this. This example reads in a GPT model with 4-way tensor and 4-way pipeline model parallelism and writes out a model with 2-way tensor and 2-way pipeline model parallelism. - -
-python tools/checkpoint_util.py \
-        --model-type GPT \
-        --load-dir checkpoints/gpt3_tp4_pp4 \
-        --save-dir checkpoints/gpt3_tp2_pp2 \
-        --target-tensor-parallel-size 2 \
-        --target-pipeline-parallel-size 2
-
-
- -Several downstream tasks are described for both GPT and BERT models below. They can be run in distributed and model parallel modes with the same changes used in the training scripts. - -## GPT Text Generation - -We have included a simple REST server to use for text generation in `tools/run_text_generation_server.py`. You run it much like you would start a pretraining job, specifying an appropriate pretrained checkpoint. There are also few optional parameters: `temperature`, `top-k`and `top-p`. See `--help` or the source file for more information. See [examples/run_text_generation_server_345M.sh](examples/run_text_generation_server_345M.sh) for an example of how to run the server. - -Once the server is running you can use `tools/text_generation_cli.py` to query it, it takes one argument which is the host the server is running on. - -
-tools/text_generation_cli.py localhost:5000
-
- -You can also use CURL or any other tools to query the server directly: - -
-curl 'http://localhost:5000/api' -X 'PUT' -H 'Content-Type: application/json; charset=UTF-8'  -d '{"prompts":["Hello world"], "tokens_to_generate":1}'
-
- -See [megatron/text_generation_server.py](megatron/text_generation_server.py) for more API options. - -### Detoxify GPT via Self-generation -We include an example in `examples/detxoify_lm/` to detoxify language models by leveraging the generative power of language models. - -See [examples/detxoify_lm/README.md](examples/detxoify_lm/README.md) for step-by-step tutorials on how to perform domain-adaptive training and detoxify LM using self-generated corpus. - - -## GPT Evaluation -We include example scripts for GPT evaluation on WikiText perplexity evaluation and LAMBADA Cloze accuracy. - -### WikiText Perplexity Evaluation -For even comparison with prior works, we evaluate perplexity on the word-level [WikiText-103 test dataset](https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip), and appropriately compute perplexity given the change in tokens when using our subword tokenizer. - -We use the following command to run WikiText-103 evaluation on a 345M parameter model. -
-TASK="WIKITEXT103"
-
-VALID_DATA=<wikitext path>.txt
-VOCAB_FILE=gpt2-vocab.json
-MERGE_FILE=gpt2-merges.txt
-CHECKPOINT_PATH=checkpoints/gpt2_345m
-
-COMMON_TASK_ARGS="--num-layers 24 \
-                  --hidden-size 1024 \
-                  --num-attention-heads 16 \
-                  --seq-length 1024 \
-                  --max-position-embeddings 1024 \
-                  --fp16 \
-                  --vocab-file $VOCAB_FILE"
-
-python tasks/main.py \
-       --task $TASK \
-       $COMMON_TASK_ARGS \
-       --valid-data $VALID_DATA \
-       --tokenizer-type GPT2BPETokenizer \
-       --merge-file $MERGE_FILE \
-       --load $CHECKPOINT_PATH \
-       --micro-batch-size 8 \
-       --activations-checkpoint-method uniform \
-       --log-interval 10 \
-       --no-load-optim \
-       --no-load-rng
-
- - -### LAMBADA Cloze Accuracy -To compute LAMBADA cloze accuracy (the accuracy of predicting the last token given the preceding tokens) we utilize a detokenized, processed version of the [LAMBADA dataset](https://github.com/cybertronai/bflm/blob/master/lambada_test.jsonl). - -We use the following command to run LAMBADA evaluation on a 345M parameter model. Note that the `--strict-lambada` flag should be used to require whole word matching. Make that `lambada` is part of the file path. - -
-TASK="LAMBADA"
-
-VALID_DATA=<lambada path>.json
-VOCAB_FILE=gpt2-vocab.json
-MERGE_FILE=gpt2-merges.txt
-CHECKPOINT_PATH=checkpoints/gpt2_345m
-COMMON_TASK_ARGS=<same as those in WikiText Perplexity Evaluation above>
-
-python tasks/main.py \
-       --task $TASK \
-       $COMMON_TASK_ARGS \
-       --valid-data $VALID_DATA \
-       --tokenizer-type GPT2BPETokenizer \
-       --strict-lambada \
-       --merge-file $MERGE_FILE \
-       --load $CHECKPOINT_PATH \
-       --micro-batch-size 8 \
-       --activations-checkpoint-method uniform \
-       --log-interval 10 \
-       --no-load-optim \
-       --no-load-rng
-
- -Further command line arguments are described in the source file [`main.py`](./tasks/main.py) - -## BERT Task Evaluation -### RACE Evaluation -The following script finetunes the BERT model for evaluation on the [RACE dataset](http://www.cs.cmu.edu/~glai1/data/race/). The `TRAIN_DATA` and `VALID_DATA` directory contain the RACE dataset as separate `.txt` files. Note that for RACE, the batch size is the number of RACE query's to evaluate. Since each RACE query has four samples, the effective batch size passed through the model will be four times the batch size specified on the command line. - -
-TRAIN_DATA="data/RACE/train/middle"
-VALID_DATA="data/RACE/dev/middle \
-            data/RACE/dev/high"
-VOCAB_FILE=bert-vocab.txt
-PRETRAINED_CHECKPOINT=checkpoints/bert_345m
-CHECKPOINT_PATH=checkpoints/bert_345m_race
-COMMON_TASK_ARGS="--num-layers 24 \
-                  --hidden-size 1024 \
-                  --num-attention-heads 16 \
-                  --seq-length 512 \
-                  --max-position-embeddings 512 \
-                  --fp16 \
-                  --vocab-file $VOCAB_FILE"
-
-COMMON_TASK_ARGS_EXT="--train-data $TRAIN_DATA \
-                      --valid-data $VALID_DATA \
-                      --pretrained-checkpoint $PRETRAINED_CHECKPOINT \
-                      --activations-checkpoint-method uniform \
-                      --save-interval 10000 \
-                      --save $CHECKPOINT_PATH \
-                      --log-interval 100 \
-                      --eval-interval 1000 \
-                      --eval-iters 10 \
-                      --weight-decay 1.0e-1"
-
-python tasks/main.py \
-       --task RACE \
-       $COMMON_TASK_ARGS \
-       $COMMON_TASK_ARGS_EXT \
-       --tokenizer-type BertWordPieceLowerCase \
-       --epochs 3 \
-       --micro-batch-size 4 \
-       --lr 1.0e-5 \
-       --lr-warmup-fraction 0.06
-
- -### MNLI Evaluation -The following script finetunes the BERT model for evaluation with the [MultiNLI sentence pair corpus](https://www.nyu.edu/projects/bowman/multinli/). Because the matching tasks are quite similar, the script can be quickly tweaked to work with the [Quora Question Pairs](https://www.kaggle.com/quora/question-pairs-dataset) (QQP) dataset as well. - -
-
-TRAIN_DATA="data/glue_data/MNLI/train.tsv"
-VALID_DATA="data/glue_data/MNLI/dev_matched.tsv \
-            data/glue_data/MNLI/dev_mismatched.tsv"
-PRETRAINED_CHECKPOINT=checkpoints/bert_345m
-VOCAB_FILE=bert-vocab.txt
-CHECKPOINT_PATH=checkpoints/bert_345m_mnli
-COMMON_TASK_ARGS=<same as those in RACE Evaluation above>
-COMMON_TASK_ARGS_EXT=<same as those in RACE Evaluation above>
-
-python tasks/main.py \
-       --task MNLI \
-       $COMMON_TASK_ARGS \
-       $COMMON_TASK_ARGS_EXT \
-       --tokenizer-type BertWordPieceLowerCase \
-       --epochs 5 \
-       --micro-batch-size 8 \
-       --lr 5.0e-5 \
-       --lr-warmup-fraction 0.065
-
- -# Datasets -We do not host any datasets for GPT or BERT training, however, we detail their collection so that our results may be reproduced. - -## Collecting Wikipedia Training Data -We recommend following the Wikipedia data extraction process specified by Google research: "the recommended pre-processing is to download [the latest dump](https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2), extract the text with [WikiExtractor.py](https://github.com/attardi/wikiextractor), and then apply any necessary cleanup to convert it into plain text." - -We recommend using the `--json` argument when using WikiExtractor, which will dump the Wikipedia data into loose json format (one json per line), making it more manageable on the file system and also readily consumable by our codebase. We recommend further preprocessing this json dataset by nltk punctuation standardization. For BERT training, use the `--split-sentences` flag to `preprocess_data.py` as described [above](#data-preprocessing) to include sentence breaks in the produced index. If you'd like to use Wikipedia data for GPT training you should still clean it with nltk/spacy/ftfy, but do not use the `--split-sentences` flag. - -## Collecting GPT Webtext Data -We utilize the publicly available [OpenWebText](https://github.com/eukaryote31/openwebtext) library from [jcpeterson](https://github.com/jcpeterson/openwebtext) and [eukaryote31's](https://github.com/eukaryote31/openwebtext) work to download urls. We then filtered, cleaned, and deduplicated all downloaded content according to the procedure described in our [openwebtext](./tools/openwebtext) directory. For reddit URLs corresponding to content up to October 2018 we arrived at approximately 37GB of content. +# Roadmaps + +Stay up-to-date with our development roadmaps and planned features: + +- **[MoE Q3-Q4 2025 Roadmap](https://github.com/NVIDIA/Megatron-LM/issues/1729)** - Comprehensive MoE feature development including DeepSeek-V3, Qwen3, advanced parallelism, FP8 optimizations, and Blackwell enhancements +- **[GPT-OSS Implementation Tracker](https://github.com/NVIDIA/Megatron-LM/issues/1739)** - Advanced features including YaRN RoPE scaling, attention sinks, and custom activation functions + +*More roadmap trackers will be added soon.* + +# Community & Support + +## Getting Help +- 📖 **[Documentation](https://docs.nvidia.com/Megatron-Core/)** - Official documentation +- 🐛 **[Issues](https://github.com/NVIDIA/Megatron-LM/issues)** - Bug reports and feature requests + +## Contributing +We ❤️ contributions! Ways to contribute: +- 🐛 **Report bugs** - Help us improve reliability +- 💡 **Suggest features** - Shape the future of Megatron Core +- 📝 **Improve docs** - Make Megatron Core more accessible +- 🔧 **Submit PRs** - Contribute code improvements + +**→ [Contributing Guide](./CONTRIBUTING.md)** + +## Citation +```bibtex +@article{megatron-lm, + title={Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism}, + author={Shoeybi, Mohammad and Patwary, Mostofa and Puri, Raul and LeGresley, Patrick and Casper, Jared and Catanzaro, Bryan}, + journal={arXiv preprint arXiv:1909.08053}, + year={2019} +} +``` diff --git a/docker/Dockerfile.ci.dev b/docker/Dockerfile.ci.dev new file mode 100644 index 0000000000..45b0cba871 --- /dev/null +++ b/docker/Dockerfile.ci.dev @@ -0,0 +1,88 @@ +# syntax=docker/dockerfile:1.3-labs + +ARG FROM_IMAGE_NAME +ARG WHEEL_DIR=/workspace/wheels + +FROM ${FROM_IMAGE_NAME} as main +ENV PIP_CONSTRAINT="" +ENV DEBIAN_FRONTEND=noninteractive +ARG UV_VERSION=0.7.2 +ARG YQ_VERSION=4.44.1 +ENV PATH="/root/.local/bin:$PATH" +ARG UV_PROJECT_ENVIRONMENT=/opt/venv +ENV UV_PROJECT_ENVIRONMENT=${UV_PROJECT_ENVIRONMENT} +ENV VIRTUAL_ENV=$UV_PROJECT_ENVIRONMENT +ENV PATH="$UV_PROJECT_ENVIRONMENT/bin:$PATH" +ENV UV_LINK_MODE=copy + +RUN bash -ex <<"EOF" + apt-get update + apt-get install -y --no-install-recommends gettext python3-venv psmisc + apt-get clean + python -m venv /opt/jet + wget https://github.com/mikefarah/yq/releases/download/v${YQ_VERSION}/yq_linux_amd64 -O /usr/local/bin/yq + chmod a+x /usr/local/bin/yq + curl -LsSf https://astral.sh/uv/${UV_VERSION}/install.sh | sh +EOF + +ARG WHEEL_DIR +COPY README.md pyproject.toml uv.lock /workspace/ +COPY megatron/core/__init__.py /workspace/megatron/core/ +COPY megatron/core/package_info.py /workspace/megatron/core/ +RUN --mount=type=cache,target=/root/.cache/uv \ + bash -ex <<"EOF" + uv venv ${UV_PROJECT_ENVIRONMENT} --system-site-packages + uv sync --extra dev --extra mlm --link-mode copy --locked \ + --no-install-package torch \ + --no-install-package torchvision \ + --no-install-package triton \ + --no-install-package nvidia-cublas-cu12 \ + --no-install-package nvidia-cuda-cupti-cu12 \ + --no-install-package nvidia-cuda-nvrtc-cu12 \ + --no-install-package nvidia-cuda-runtime-cu12 \ + --no-install-package nvidia-cudnn-cu12 \ + --no-install-package nvidia-cufft-cu12 \ + --no-install-package nvidia-cufile-cu12 \ + --no-install-package nvidia-curand-cu12 \ + --no-install-package nvidia-cusolver-cu12 \ + --no-install-package nvidia-cusparse-cu12 \ + --no-install-package nvidia-cusparselt-cu12 \ + --no-install-package nvidia-nccl-cu12 +EOF + +# Install DeepEP +RUN bash -ex <<"EOF" + cd /workspace + uv pip install nvidia-nvshmem-cu12 + pushd /opt/venv/lib/python3.12/site-packages/nvidia/nvshmem/lib/ + ln -s libnvshmem_host.so.3 libnvshmem_host.so + popd + + git clone --branch v1.2.1 https://github.com/deepseek-ai/DeepEP.git + TORCH_CUDA_ARCH_LIST="9.0" uv pip install --no-build-isolation -v DeepEP/. + rm -rf DeepEP +EOF + +COPY assets/ /opt/data/ +ENV UV_PYTHON=$UV_PROJECT_ENVIRONMENT/bin/python + +##### For NVIDIANS only ##### +FROM main as jet +ARG JET_API_VERSION +ENV PATH="$PATH:/opt/jet/bin" +RUN --mount=type=secret,id=JET_INDEX_URLS bash -ex <<"EOF" + JET_INDEX_URLS=$(cat /run/secrets/JET_INDEX_URLS) + python -m venv /opt/jet + /opt/jet/bin/pip install --no-cache-dir $JET_INDEX_URLS \ + jet-api==$JET_API_VERSION +EOF + +RUN --mount=type=secret,id=JET_INDEX_URLS \ + --mount=type=secret,id=LOGGER_INDEX_URL bash -ex <<"EOF" + JET_INDEX_URLS=$(cat /run/secrets/JET_INDEX_URLS) + LOGGER_INDEX_URL=$(cat /run/secrets/LOGGER_INDEX_URL) + uv pip install --no-cache-dir --upgrade $LOGGER_INDEX_URL "one-logger" + uv pip install --no-cache-dir --upgrade "setuptools<80.0.0" + uv pip install --no-cache-dir --upgrade $JET_INDEX_URLS "jet-client~=2.0" +EOF +### diff --git a/docker/Dockerfile.ci.lts b/docker/Dockerfile.ci.lts new file mode 100644 index 0000000000..8889760cfc --- /dev/null +++ b/docker/Dockerfile.ci.lts @@ -0,0 +1,98 @@ +# syntax=docker/dockerfile:1.3-labs + +ARG FROM_IMAGE_NAME +ARG WHEEL_DIR=/workspace/wheels + +FROM $FROM_IMAGE_NAME as build_mamba +WORKDIR /opt +ARG WHEEL_DIR +RUN MAMBA_FORCE_BUILD=TRUE pip3 wheel -v git+https://github.com/state-spaces/mamba.git@v2.0.3 -w $WHEEL_DIR + +ARG FROM_IMAGE_NAME +FROM $FROM_IMAGE_NAME as build_causalconv1d +WORKDIR /opt +ARG WHEEL_DIR +RUN CAUSAL_CONV1D_FORCE_BUILD=TRUE pip3 wheel -v git+https://github.com/Dao-AILab/causal-conv1d.git@v1.2.2.post1 -w $WHEEL_DIR + +FROM $FROM_IMAGE_NAME as build_groupedgemm +WORKDIR /opt +ARG WHEEL_DIR +RUN pip3 wheel -v git+https://github.com/fanshiqing/grouped_gemm@v1.1.2 -w $WHEEL_DIR + + +ARG FROM_IMAGE_NAME +FROM $FROM_IMAGE_NAME as main +ENV DEBIAN_FRONTEND=noninteractive + +RUN bash -ex <<"EOF" + apt-get update + apt-get install -y --no-install-recommends gettext python3-venv psmisc + apt-get clean + python -m venv /opt/jet + wget https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -O /usr/local/bin/yq + chmod a+x /usr/local/bin/yq +EOF + +ARG UV_VERSION=0.7.2 +ENV PATH="/root/.local/bin:$PATH" +RUN curl -LsSf https://astral.sh/uv/${UV_VERSION}/install.sh | sh +ENV UV_PROJECT_ENVIRONMENT=/opt/venv +ENV PATH="$UV_PROJECT_ENVIRONMENT/bin:$PATH" +ENV VIRTUAL_ENV=$UV_PROJECT_ENVIRONMENT +ENV UV_LINK_MODE=copy + +RUN +ARG WHEEL_DIR +COPY README.md pyproject.toml uv.lock /workspace/ +COPY megatron/core/__init__.py /workspace/megatron/core/ +COPY megatron/core/package_info.py /workspace/megatron/core/ +COPY docker/common/ /workspace/docker/common/ +COPY --from=build_mamba $WHEEL_DIR/*.whl $WHEEL_DIR/ +COPY --from=build_causalconv1d $WHEEL_DIR/*.whl $WHEEL_DIR/ +COPY --from=build_groupedgemm $WHEEL_DIR/*.whl $WHEEL_DIR/ +RUN bash -ex <<"EOF" + uv venv ${UV_PROJECT_ENVIRONMENT} --system-site-packages + + uv sync --extra lts --extra mlm --link-mode copy --locked \ + --no-install-package torch \ + --no-install-package torchvision \ + --no-install-package triton \ + --no-install-package nvidia-cublas-cu12 \ + --no-install-package nvidia-cuda-cupti-cu12 \ + --no-install-package nvidia-cuda-nvrtc-cu12 \ + --no-install-package nvidia-cuda-runtime-cu12 \ + --no-install-package nvidia-cudnn-cu12 \ + --no-install-package nvidia-cufft-cu12 \ + --no-install-package nvidia-cufile-cu12 \ + --no-install-package nvidia-curand-cu12 \ + --no-install-package nvidia-cusolver-cu12 \ + --no-install-package nvidia-cusparse-cu12 \ + --no-install-package nvidia-cusparselt-cu12 \ + --no-install-package nvidia-nccl-cu12 + + bash docker/common/install_source_wheels.sh --input-wheel-dir $WHEEL_DIR/ --environment lts +EOF +ENV PYTHONPATH="/opt/megatron-lm:$PYTHONPATH" +COPY assets/ /opt/data/ +ENV UV_PYTHON=$UV_PROJECT_ENVIRONMENT/bin/python + +##### For NVIDIANS only ##### +FROM main as jet +ARG JET_API_VERSION +ENV PATH="$PATH:/opt/jet/bin" +RUN --mount=type=secret,id=JET_INDEX_URLS bash -ex <<"EOF" + JET_INDEX_URLS=$(cat /run/secrets/JET_INDEX_URLS) + python -m venv /opt/jet + /opt/jet/bin/pip install --no-cache-dir $JET_INDEX_URLS \ + jet-api==$JET_API_VERSION +EOF + +RUN --mount=type=secret,id=JET_INDEX_URLS \ + --mount=type=secret,id=LOGGER_INDEX_URL bash -ex <<"EOF" + JET_INDEX_URLS=$(cat /run/secrets/JET_INDEX_URLS) + LOGGER_INDEX_URL=$(cat /run/secrets/LOGGER_INDEX_URL) + uv pip install --no-cache-dir --upgrade $LOGGER_INDEX_URL "one-logger" + uv pip install --no-cache-dir --upgrade "setuptools<80.0.0" + uv pip install --no-cache-dir --upgrade $JET_INDEX_URLS "jet-client~=2.0" +EOF +### \ No newline at end of file diff --git a/docker/Dockerfile.ci.nemo b/docker/Dockerfile.ci.nemo new file mode 100644 index 0000000000..0452976a8c --- /dev/null +++ b/docker/Dockerfile.ci.nemo @@ -0,0 +1,20 @@ +# syntax=docker/dockerfile:1.3-labs + +ARG FROM_IMAGE_NAME +FROM ${FROM_IMAGE_NAME} as main + +RUN apt-get update && \ + apt-get install -y --no-install-recommends gettext && \ + apt-get clean && \ + wget https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -O /usr/local/bin/yq && \ + chmod a+x /usr/local/bin/yq + +##### For NVIDIANS only ##### +FROM main as jet +ARG JET_API_VERSION +RUN --mount=type=secret,id=JET_INDEX_URLS \ + JET_INDEX_URLS=$(cat /run/secrets/JET_INDEX_URLS) && \ + pip install --no-cache-dir jet-api==$JET_API_VERSION "jet-client~=2.0" --upgrade $JET_INDEX_URLS + +ENV PATH="$PATH:/opt/jet/bin" +### diff --git a/docker/Dockerfile.linting b/docker/Dockerfile.linting new file mode 100644 index 0000000000..259c0bbedc --- /dev/null +++ b/docker/Dockerfile.linting @@ -0,0 +1,23 @@ +# syntax=docker/dockerfile:experimental + +ARG FROM_IMAGE_NAME +FROM $FROM_IMAGE_NAME as main +ENV DEBIAN_FRONTEND=noninteractive +ARG UV_VERSION=0.7.2 +ARG YQ_VERSION=4.44.1 +ENV PATH="/root/.local/bin:$PATH" +ENV UV_PROJECT_ENVIRONMENT=/opt/venv +ENV PATH="$UV_PROJECT_ENVIRONMENT/bin:$PATH" +ENV UV_LINK_MODE=copy +RUN curl -LsSf https://astral.sh/uv/${UV_VERSION}/install.sh | sh +WORKDIR /opt/megatron-lm +COPY pyproject.toml uv.lock /opt/megatron-lm/ +COPY megatron/core/package_info.py megatron/core/__init__.py /opt/megatron-lm/megatron/core/ +RUN uv sync --locked --only-group linting --only-group test --only-group ci + +##### For NVIDIANS only ##### +FROM main as jet +ARG JET_API_VERSION +RUN --mount=type=secret,id=JET_INDEX_URLS \ + JET_INDEX_URLS=$(cat /run/secrets/JET_INDEX_URLS) && \ + uv pip install --no-cache-dir "jet-client~=2.0" --upgrade $JET_INDEX_URLS diff --git a/docker/common/build_causalconv1d.sh b/docker/common/build_causalconv1d.sh new file mode 100644 index 0000000000..c5f030d8dd --- /dev/null +++ b/docker/common/build_causalconv1d.sh @@ -0,0 +1,68 @@ +#!/bin/bash +set -xeuo pipefail # Exit immediately if a command exits with a non-zero status + +# Initialize variables +REPO_URL="https://github.com/Dao-AILab/causal-conv1d.git" +REPO_REF="v1.2.2.post1" +OUTPUT_WHEEL_DIR="$(pwd)/wheels" +SCRIPT_DIR="$(dirname $(realpath $0))" + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --repo-url) + REPO_URL="$2" + shift 2 + ;; + --repo-ref) + REPO_REF="$2" + shift 2 + ;; + --output-wheel-dir) + OUTPUT_WHEEL_DIR="$2" + shift 2 + ;; + *) + echo "Unknown option: $1" + echo "Usage: $0 --repo-url URL --repo-ref REF --output-wheel-dir DIR" + exit 1 + ;; + esac +done + +# Check if required arguments are provided +if [ -z "$REPO_URL" ] || [ -z "$REPO_REF" ] || [ -z "$OUTPUT_WHEEL_DIR" ]; then + echo "Error: --repo-url, --repo-ref, and --output-wheel-dir are required" + echo "Usage: $0 --repo-url URL --repo-ref REF --output-wheel-dir DIR" + exit 1 +fi + +# Create a temporary directory +TEMP_DIR=$(mktemp -d) +echo "Working in temporary directory: ${TEMP_DIR}" +python3 -m venv "${TEMP_DIR}/venv" --system-site-packages +source "${TEMP_DIR}/venv/bin/activate" + +# Ensure cleanup on script exit +trap 'rm -rf "${TEMP_DIR}"' EXIT + +# Change to temporary directory +cd "${TEMP_DIR}" + +# Initialize git repository +git init + +# Perform git fetch with depth 1 +git fetch "${REPO_URL}" "${REPO_REF}" --depth 1 + +git checkout FETCH_HEAD + +# Fetch submodules +git submodule update --init --recursive + +# Create output directory if it doesn't exist +mkdir -p "${OUTPUT_WHEEL_DIR}" + +# Build the wheel using python -m build +export CAUSAL_CONV1D_FORCE_BUILD=TRUE +pip3 wheel --no-cache-dir --no-deps -w "${OUTPUT_WHEEL_DIR}" . diff --git a/docker/common/build_groupedgemm.sh b/docker/common/build_groupedgemm.sh new file mode 100644 index 0000000000..cd48b7c1f3 --- /dev/null +++ b/docker/common/build_groupedgemm.sh @@ -0,0 +1,68 @@ +#!/bin/bash +set -xeuo pipefail # Exit immediately if a command exits with a non-zero status + +# Initialize variables +REPO_URL="https://github.com/fanshiqing/grouped_gemm" +REPO_REF="v1.1.2" +OUTPUT_WHEEL_DIR="$(pwd)/wheels" +SCRIPT_DIR="$(dirname $(realpath $0))" + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --repo-url) + REPO_URL="$2" + shift 2 + ;; + --repo-ref) + REPO_REF="$2" + shift 2 + ;; + --output-wheel-dir) + OUTPUT_WHEEL_DIR="$2" + shift 2 + ;; + *) + echo "Unknown option: $1" + echo "Usage: $0 --repo-url URL --repo-ref REF --output-wheel-dir DIR" + exit 1 + ;; + esac +done + +# Check if required arguments are provided +if [ -z "$REPO_URL" ] || [ -z "$REPO_REF" ] || [ -z "$OUTPUT_WHEEL_DIR" ]; then + echo "Error: --repo-url, --repo-ref, and --output-wheel-dir are required" + echo "Usage: $0 --repo-url URL --repo-ref REF --output-wheel-dir DIR" + exit 1 +fi + +# Create a temporary directory +TEMP_DIR=$(mktemp -d) +echo "Working in temporary directory: ${TEMP_DIR}" +python3 -m venv "${TEMP_DIR}/venv" --system-site-packages +source "${TEMP_DIR}/venv/bin/activate" + +# Ensure cleanup on script exit +trap 'rm -rf "${TEMP_DIR}"' EXIT + +# Change to temporary directory +cd "${TEMP_DIR}" + +# Initialize git repository +git init + +# Perform git fetch with depth 1 +git fetch "${REPO_URL}" "${REPO_REF}" --depth 1 + +git checkout FETCH_HEAD + +# Fetch submodules +git submodule update --init --recursive + +# Create output directory if it doesn't exist +mkdir -p "${OUTPUT_WHEEL_DIR}" + +# Build the wheel using python -m build +export MAMBA_FORCE_BUILD=TRUE +pip3 wheel --no-cache-dir --no-deps -w "${OUTPUT_WHEEL_DIR}" . diff --git a/docker/common/build_mamba.sh b/docker/common/build_mamba.sh new file mode 100644 index 0000000000..385a5bddbd --- /dev/null +++ b/docker/common/build_mamba.sh @@ -0,0 +1,67 @@ +#!/bin/bash +set -xeuo pipefail # Exit immediately if a command exits with a non-zero status + +# Initialize variables +REPO_URL="https://github.com/state-spaces/mamba.git" +REPO_REF="2e16fc3062cdcd4ebef27a9aa4442676e1c7edf4" +OUTPUT_WHEEL_DIR="$(pwd)/wheels" +SCRIPT_DIR="$(dirname $(realpath $0))" + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --repo-url) + REPO_URL="$2" + shift 2 + ;; + --repo-ref) + REPO_REF="$2" + shift 2 + ;; + --output-wheel-dir) + OUTPUT_WHEEL_DIR="$2" + shift 2 + ;; + *) + echo "Unknown option: $1" + echo "Usage: $0 --repo-url URL --repo-ref REF --output-wheel-dir DIR" + exit 1 + ;; + esac +done + +# Check if required arguments are provided +if [ -z "$REPO_URL" ] || [ -z "$REPO_REF" ] || [ -z "$OUTPUT_WHEEL_DIR" ]; then + echo "Error: --repo-url, --repo-ref, and --output-wheel-dir are required" + echo "Usage: $0 --repo-url URL --repo-ref REF --output-wheel-dir DIR" + exit 1 +fi + +# Create a temporary directory +TEMP_DIR=$(mktemp -d) +echo "Working in temporary directory: ${TEMP_DIR}" +python3 -m venv "${TEMP_DIR}/venv" --system-site-packages +source "${TEMP_DIR}/venv/bin/activate" + +# Ensure cleanup on script exit +trap 'rm -rf "${TEMP_DIR}"' EXIT + +# Change to temporary directory +cd "${TEMP_DIR}" + +# Initialize git repository +git init + +# Perform git fetch with depth 1 +git fetch "${REPO_URL}" "${REPO_REF}" --depth 1 + +git checkout FETCH_HEAD + +# Fetch submodules +git submodule update --init --recursive + +# Create output directory if it doesn't exist +mkdir -p "${OUTPUT_WHEEL_DIR}" + +# Build the wheel using python -m build +pip3 wheel --no-cache-dir --no-deps -w "${OUTPUT_WHEEL_DIR}" . diff --git a/docker/common/build_te.sh b/docker/common/build_te.sh new file mode 100644 index 0000000000..ae1fa78f56 --- /dev/null +++ b/docker/common/build_te.sh @@ -0,0 +1,70 @@ +#!/bin/bash +set -xeuo pipefail # Exit immediately if a command exits with a non-zero status + +# Initialize variables +REPO_URL=$(cat docker/common/manifest.json | jq -r '."vcs-dependencies"."transformer-engine".repo') +REPO_REF=$(cat docker/common/manifest.json | jq -r '."vcs-dependencies"."transformer-engine".ref') + +OUTPUT_WHEEL_DIR="$(pwd)/wheels" +SCRIPT_DIR="$(dirname $(realpath $0))" + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --repo-url) + REPO_URL="$2" + shift 2 + ;; + --repo-ref) + REPO_REF="$2" + shift 2 + ;; + --output-wheel-dir) + OUTPUT_WHEEL_DIR="$2" + shift 2 + ;; + *) + echo "Unknown option: $1" + echo "Usage: $0 --repo-url URL --repo-ref REF --output-wheel-dir DIR" + exit 1 + ;; + esac +done + +# Check if required arguments are provided +if [ -z "$REPO_URL" ] || [ -z "$REPO_REF" ] || [ -z "$OUTPUT_WHEEL_DIR" ]; then + echo "Error: --repo-url, --repo-ref, and --output-wheel-dir are required" + echo "Usage: $0 --repo-url URL --repo-ref REF --output-wheel-dir DIR" + exit 1 +fi + +# Create a temporary directory +TEMP_DIR=$(mktemp -d) +echo "Working in temporary directory: ${TEMP_DIR}" +python3 -m venv "${TEMP_DIR}/venv" --system-site-packages +source "${TEMP_DIR}/venv/bin/activate" + +# Ensure cleanup on script exit +trap 'rm -rf "${TEMP_DIR}"' EXIT + +# Change to temporary directory +cd "${TEMP_DIR}" + +# Initialize git repository +git init + +# Perform git fetch with depth 1 +git fetch "${REPO_URL}" "${REPO_REF}" --depth 1 + +git checkout FETCH_HEAD + +# Fetch submodules +git submodule update --init --recursive + +# Create output directory if it doesn't exist +mkdir -p "${OUTPUT_WHEEL_DIR}" + +# Build the wheel using python -m build +export NVTE_FRAMEWORK=pytorch # Optionally set framework +pip3 wheel --no-cache-dir --no-build-isolation -w "${OUTPUT_WHEEL_DIR}" . +ls -al "${OUTPUT_WHEEL_DIR}" diff --git a/docker/common/install_source_wheels.sh b/docker/common/install_source_wheels.sh new file mode 100644 index 0000000000..1308e60482 --- /dev/null +++ b/docker/common/install_source_wheels.sh @@ -0,0 +1,57 @@ +#!/bin/bash +set -xeuo pipefail # Exit immediately if a command exits with a non-zero status + +INPUT_WHEEL_DIR=$(pwd)/wheels + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --input-wheel-dir) + INPUT_WHEEL_DIR="$2" + shift 2 + ;; + --environment) + ENVIRONMENT="$2" + shift 2 + ;; + *) + echo "Unknown option: $1" + echo "Usage: $0 --input-wheel-dir DIR" + exit 1 + ;; + esac +done + +# Check if required arguments are provided +if [ -z "$INPUT_WHEEL_DIR" ] || [ -z "$ENVIRONMENT" ]; then + echo "Error: --input-wheel-dir and --environment are required" + echo "Usage: $0 --input-wheel-dir DIR --environment ENV" + exit 1 +fi + +if [ "$ENVIRONMENT" = "dev" ]; then + TE_WHEEL=$(ls $INPUT_WHEEL_DIR/transformer_engine*.whl) || true + [ -z "$TE_WHEEL" ] && TE_WHEEL=$(bash docker/common/build_te.sh --output-wheel-dir $INPUT_WHEEL_DIR | tail -n 1) +fi + +MAMBA_WHEEL=$(ls $INPUT_WHEEL_DIR/mamba*.whl) || true +[ -z "$MAMBA_WHEEL" ] && MAMBA_WHEEL=$(bash docker/common/build_mamba.sh --output-wheel-dir $INPUT_WHEEL_DIR | tail -n 1) + +CAUSALCONV1D_WHEEL=$(ls $INPUT_WHEEL_DIR/causal_conv1d*.whl) || true +[ -z "$CAUSALCONV1D_WHEEL" ] && CAUSALCONV1D_WHEEL=$(bash docker/common/build_causalconv1d.sh --output-wheel-dir $INPUT_WHEEL_DIR | tail -n 1) + +GROUPEDGEMM_WHEEL=$(ls $INPUT_WHEEL_DIR/grouped_gemm*.whl) || true +[ -z "$GROUPEDGEMM_WHEEL" ] && GROUPEDGEMM_WHEEL=$(bash docker/common/build_groupedgemm.sh --output-wheel-dir $INPUT_WHEEL_DIR | tail -n 1) + +# Override deps that are already present in the base image +# only for dev +if [ "$ENVIRONMENT" = "dev" ]; then + uv pip install --no-cache-dir --no-deps $TE_WHEEL +fi + +# Install heavy optional deps like mamba, causalconv1d, groupedgemm +uv pip install --no-cache-dir \ + $MAMBA_WHEEL \ + $CAUSALCONV1D_WHEEL \ + $GROUPEDGEMM_WHEEL \ + "setuptools<80.0.0" diff --git a/docker/common/manifest.json b/docker/common/manifest.json new file mode 100644 index 0000000000..65de4212e6 --- /dev/null +++ b/docker/common/manifest.json @@ -0,0 +1,10 @@ +{ + "ngc-pytorch": "nvcr.io/nvidia/pytorch:25.03-py3", + "vcs-dependencies": { + "transformer-engine": { + "repo": "https://github.com/NVIDIA/TransformerEngine", + "ref": "bee4649c15a79ffcb9689ca7c0c963f5febaa28a" + } + }, + "pypi-dependencies": {} +} \ No newline at end of file diff --git a/docs/distrib_optimizer.md b/docs/distrib_optimizer.md deleted file mode 100644 index def23b20eb..0000000000 --- a/docs/distrib_optimizer.md +++ /dev/null @@ -1,54 +0,0 @@ -# Distributed Optimizer - -The motivation for the distributed optimizer is to save memory by distributing the optimizer state evenly across data parallel ranks, versus the current method of replicating the optimizer state across data parallel ranks. As described in https://arxiv.org/abs/1910.02054, this branch specifically implements the following: - -- [yes] distribute all 'non-overlapping' optimizer state (i.e., model params already in fp32 are NOT distributed) -- [no] distribute model gradients -- [no] distribute model parameters - -Theoretical memory savings vary depending on the combination of the model's param dtype and grad dtype. In the current implementation, the theoretical number of bytes per parameter is (where 'd' is the data parallel size): - -| | Non-distributed optim | Distributed optim | -| ------ | ------ | ------ | -| float16 param, float16 grads | 20 | 4 + 16/d | -| float16 param, fp32 grads | 18 | 6 + 12/d | -| fp32 param, fp32 grads | 16 | 8 + 8/d | - -The implementation of the distributed optimizer is centered on using the contiguous grad buffer for communicating grads & params between the model state and the optimizer state. The grad buffer at any given moment either holds: - -1. all model grads -2. a 1/d size _copy_ of the main grads (before copying to the optimizer state) -3. a 1/d size _copy_ of the main params (after copying from the optimizer state) -4. all model params -5. zeros (or None), between iterations - -The grad buffer is used for performing reduce-scatter and all-gather operations, for passing grads & params between the model state and optimizer state. With this implementation, no dynamic buffers are allocated. - -The figures below illustrate the grad buffer's sharding scheme, and the key steps of the distributed optimizer's param update: - -## Data flow - -![Data flow](images/distrib_optimizer/data_flow.png) - -## Sharding scheme - -![Sharding scheme](images/distrib_optimizer/sharding_scheme.png) - -## Key steps - -_(note: using illustrations above, and assuming fp16 grads)_ - -- Backward pass finishes (grad buffer holds 16 fp16 grad elements) -- Call reduce-scatter on each DP rank -- Each DP rank now has 4 elements within the grad buffer that are fully reduced (remaining 12 elements are garbage) -- Each DP rank copies its relevant 4 fp16 grad elements from the grad buffer into 4 fp32 main grad elements (separate buffer, owned by the optimizer); i.e. - - DP rank 0 copies elements [0:4] - - DP rank 1 copies elements [4:8] - - DP rank 2 copies elements [8:12] - - DP rank 3 copies elements [12:16] -- Optimizer.step() -- Each DP rank copies its 4 fp32 main (/optimizer) param elements into the corresponding 4 fp16 elements in the grad buffer -- Call all-gather on each DP rank -- Grad buffer now contains all 16, fully updated, fp16 model param elements -- Copy updated model params from grad buffer into their respective param tensors -- (At this point, grad buffer is ready to be zero'd for the next iteration) diff --git a/docs/images/distrib_optimizer/data_flow.png b/docs/images/distrib_optimizer/data_flow.png deleted file mode 100644 index d48fc134c4..0000000000 Binary files a/docs/images/distrib_optimizer/data_flow.png and /dev/null differ diff --git a/docs/images/distrib_optimizer/sharding_scheme.png b/docs/images/distrib_optimizer/sharding_scheme.png deleted file mode 100644 index b07c25b05f..0000000000 Binary files a/docs/images/distrib_optimizer/sharding_scheme.png and /dev/null differ diff --git a/docs/llama_mistral.md b/docs/llama_mistral.md new file mode 100644 index 0000000000..5dd61866e8 --- /dev/null +++ b/docs/llama_mistral.md @@ -0,0 +1,444 @@ +# Llama, Mistral and other Llama-like model support in Megatron-LM + +NOTE: In order to simplify code we now only support converting llama-3.x and mistral checkpoints downloaded from Huggingface. + +The [Llama-2](https://ai.meta.com/llama/) and [Llama-3.x](https://llama.meta.com/) family of models are an open-source set of pretrained & finetuned (for chat) models that have achieved strong results across a wide set of benchmarks. At their times of release, both Llama-2 and Llama-3 models achieved among the best results for open-source models, and were competitive with leading closed-source models (see https://arxiv.org/pdf/2307.09288.pdf and https://ai.meta.com/blog/meta-llama-3/). + +Similarly, [Mistral-7b](https://mistral.ai/news/announcing-mistral-7b/) is an open-source model with pretrained and finetuned (for chat) variants that achieve strong benchmark results. + +Architecturally Llama-2, Llama-3 and Mistral-7b are very similar. As such Megatron can support loading checkpoints from all three for inference and finetuning. Converting the checkpoints and loading them is slightly different for each model and is detailed for each below. + +# Contents + +- [Llama, Mistral and other Llama-like model support in Megatron-LM](#llama-mistral-and-other-llama-like-model-support-in-megatron-lm) +- [Contents](#contents) +- [Llama-2](#llama-2) + - [Download Meta or Huggingface checkpoints](#download-meta-or-huggingface-checkpoints) + - [Convert checkpoint format](#convert-checkpoint-format) + - [Meta format](#meta-format) + - [Huggingface format](#huggingface-format) + - [Launch model](#launch-model) + - [Launch Megatron](#launch-megatron) + - [Launch Meta](#launch-meta) + - [Launch Huggingface](#launch-huggingface) + - [Benchmark results](#benchmark-results) + - [Big Bench](#big-bench) + - [Multilingual](#multilingual) + - [LM Evaluation Harness](#lm-evaluation-harness) + - [MMLU](#mmlu) +- [Llama-3.x](#llama-3x) + - [Download Huggingface checkpoints](#download-huggingface-checkpoints) + - [Convert checkpoint format](#convert-checkpoint-format-1) + - [Huggingface format](#huggingface-format-1) + - [(Optional) Validate checkpoints](#optional-validate-checkpoints) + - [Launch model](#launch-model-1) +- [Mistral-7b](#mistral-7b) + - [Download Huggingface checkpoints](#download-huggingface-checkpoints-2) + - [Convert checkpoint format](#convert-checkpoint-format-3) + - [(Optional) Validate checkpoints](#optional-validate-checkpoints-2) + - [Launch model](#launch-model-3) +- [Other Llama-like model support](#other-llama-like-model-support) +- [Known numerical differences](#known-numerical-differences) +- [Using legacy model format](#using-legacy-model-format) + + +# Llama-2 + +Llama-2 checkpoints can be loaded into Megatron for inference and for finetuning. Loading these checkpoints consists of three steps: + +1. Get access to download the checkpoints. +2. Convert the checkpoints from Meta/Huggingface format to Megatron format. +3. Setup arguments for launching the model. + +The following sections detail these steps. The final section lists benchmark result comparisons between: 1) Llama-2 inference code running the Meta-format checkpoints, and 2) Megatron inference code running the converted checkpoints. + +## Download Meta or Huggingface checkpoints + +Users must first apply for access to download the Llama-2 checkpoints either directly from [Meta](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) or through [Huggingface](https://huggingface.co/docs/transformers/main/model_doc/llama2) (HF). The checkpoints are available in two formats, Meta's native format (available from both the Meta and HF links), and HF's format (available only from HF). Either format can be converted to Megatron, as detailed next. + +## Convert checkpoint format + +We recommend passing `--dtype bf16` for training or finetuning. Inference can be done in bfloat16 or float16. + +### Meta format + +The Meta format checkpoints are converted to HF format as an intermediate step before converting to Megatron format. The `transformers` package is required, and must have version >=4.31.0 (e.g., `pip install transformers>=4.31.0`). (**Note**: we have specifically tested with versions `4.31.0` and `4.32.0`; your experience may vary with newer versions.) Assuming the downloaded checkpoints are in `$CHECKPOINT_DIR` (with separate sub-directories for 7B, 13B, 70B, etc.), the following example command can be used to convert from Llama-2 format to HF format in bfloat16: + +``` +python tools/checkpoint/convert.py \ +> --model-type GPT \ +> --loader llama_mistral \ +> --load-dir ${META_FORMAT_DIR} \ +> --model-size ${MODEL_SIZE} \ +> --checkpoint-type meta \ +> --tokenizer-model ${TOKENIZER_MODEL} \ +> --saver core \ +> --save-dir ${MEGATRON_FORMAT_DIR} \ +> --target-tensor-parallel-size ${TP} \ +> --target-pipeline-parallel-size ${PP} \ +> --bf16 +``` + +Valid values for `--model-size` are `llama2-7B`, `llama2-13B`, and `llama2-70B` (for pretrained-only models), and `llama2-7Bf`, `llama2-13Bf`, and `llama2-70Bf` (for chat-finetuned models). + +### Huggingface format + +The HF checkpoints can be converted to Megatron format by using Megatron's own Llama-2 checkpoint converter for HF format (see script `tools/checkpoint/loader_llama_mistral.py`). One important argument that must be set correctly is the tensor parallel size (`TP`) for each model. The following table shows these values: + +| Model size | Tensor parallel size (`TP`) | +| ---------- | --------------------------- | +| 7B | 1 | +| 13B | 2 | +| 70B | 8 | + +Using these values for `TP`, along with the path to the Llama-2 tokenizer model (automatically downloaded with original checkpoint download; see `${TOKENIZER_MODEL}` below), run the following command from the root of your Megatron source code to convert from HF format to Megatron format: + +``` +python tools/checkpoint/convert.py \ +> --model-type GPT \ +> --loader llama_mistral \ +> --load-dir ${HF_FORMAT_DIR} \ +> --model-size ${MODEL_SIZE} \ +> --checkpoint-type hf \ +> --tokenizer-model ${TOKENIZER_MODEL} \ +> --saver core \ +> --save-dir ${MEGATRON_FORMAT_DIR} \ +> --target-tensor-parallel-size ${TP} \ +> --target-pipeline-parallel-size ${PP} \ +> --bf16 +``` + +After this conversion, we are ready to load the checkpoints into a Megatron GPT model. + +## Launch model + +### Launch Megatron + +If loading for either inference or finetuning, use the following arguments: + +``` +--tensor-model-parallel-size ${TP} \ +--pipeline-model-parallel-size 1 \ +--seq-length 4096 \ +--max-position-embeddings 4096 \ +--tokenizer-type Llama2Tokenizer \ +--tokenizer-model ${TOKENIZER_MODEL} \ +--load ${CHECKPOINT_DIR} \ +--exit-on-missing-checkpoint \ +--use-checkpoint-args \ +--no-load-optim \ +--no-load-rng \ +--untie-embeddings-and-output-weights \ +--use-rotary-position-embeddings \ +--normalization RMSNorm \ +--no-position-embedding \ +--no-masked-softmax-fusion \ +--attention-softmax-in-fp32 +``` + +**Note:** If you converted to the legacy model format (i.e., `--saver legacy`), please see [here](#using-legacy-model-format). + +### Launch Meta + +Meta checkpoints can be launched with: https://github.com/facebookresearch/llama + +### Launch Huggingface + +Huggingface checkpoints can be launched with: https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py + +## Benchmark results + +The tables below list the benchmark comparisons between native Llama-2 (using Meta's checkpoint and Meta's inference code) and Megatron (using a converted HF checkpoint and Megatron's inference code). + +The values are the percent error between Megatron and Llama-2, calculated using the formula: `| - | / `, where the type of score is detailed before each table. Across all tests (80 total per model size), the mean error is 0.15%. The small difference in benchmark scores between the two models is due to minor arithmetic differences in implementation that alter the numerics slightly. Some of the factors that influence this difference include: + +- Megatron performs batch matrix multiplications in a couple places, such as within self attention and in SwiGLU, that Llama performs separately. +- Megatron uses `torch.baddbmm` within self attention, versus Llama using `torch.matmul`. +- Megatron uses a `sin`/`cos` implementation for rotary position embeddings, versus Llama using a `polar`/`complex` implementation. +- Llama calls `torch.set_default_dtype(torch.float16)` during initialization, which Megatron does not. + +### Big Bench + +Score type: multiple choice grade. + +| bigbench / standard | 7b | 13b | 70b | +| -- | -- | -- | -- | +| date_understanding | 0.29% | 0.13% | 0.12% | +| general_knowledge | 0.00% | 0.00% | 0.00% | +| human_organs_senses | 0.00% | 0.00% | 0.00% | +| intent_recognition | 0.00% | 0.11% | 0.00% | +| riddle_sense | 0.00% | 0.00% | 0.00% | +| similarities_abstraction | 0.00% | 0.58% | 0.00% | +| simple_arithmetic_json_multiple_choice | 0.00% | 0.00% | 0.00% | +| undo_permutation | 0.19% | 0.19% | 0.18% | + +### Multilingual + +Score type: multiple choice grade. + +| multilingual / xcopa | 7b | 13b | 70b | +| -- | -- | -- | -- | +| en-template-mGPT-remove-punctuation | 0.08% | 0.00% | 0.00% | +| et-template-mGPT-remove-punctuation | 0.00% | 0.13% | 0.25% | +| ht-template-mGPT-remove-punctuation | 0.26% | 0.13% | 0.26% | +| id-template-mGPT-remove-punctuation | 0.11% | 0.00% | 0.19% | +| it-template-mGPT-remove-punctuation | 0.00% | 0.10% | 0.09% | +| qu-template-mGPT-remove-punctuation | 0.00% | 0.00% | 0.27% | +| sw-template-mGPT-remove-punctuation | 0.14% | 0.13% | 0.13% | +| th-template-mGPT-remove-punctuation | 0.25% | 0.13% | 0.13% | +| tr-template-mGPT-remove-punctuation | 0.26% | 0.00% | 0.34% | +| vi-template-mGPT-remove-punctuation | 0.00% | 0.11% | 0.00% | +| zh-template-mGPT-remove-punctuation | 0.00% | 0.10% | 0.09% | + +### LM Evaluation Harness + +Score type: multiple choice grade. + +| lm-eval | 7b | 13b | 70b | +| -- | -- | -- | -- | +| boolq | 0.04% | 0.04% | 0.07% | +| hellaswag | 0.02% | 0.03% | 0.03% | +| piqa | 0.00% | 0.00% | 0.07% | +| winogrande | 0.00% | 0.11% | 0.20% | + +### MMLU + +Score type: multiple choice grade. + +Note: the number in brackets is the number of sub-tasks for each supercategory. + +| mmlu | 7b | 13b | 70b | +| -- | -- | -- | -- | +| stem [18] | 0.79% | 0.05% | 0.01% | +| humanities [13] | 0.19% | 0.01% | 0.02% | +| other (business, health, misc.) [14] | 0.08% | 0.06% | 0.12% | +| social sciences [12] | 0.37% | 0.21% | 0.01% | + +# Llama-3.x + +Llama-3.x checkpoints can be loaded into Megatron for inference and for finetuning. Loading these checkpoints consists of several steps: + +1. Get access to download the checkpoints (weights and tokenizer). +2. Convert the checkpoints from Huggingface format to Megatron format. +3. (Optional) Validate converted checkpoints +4. Setup arguments for launching the model. + +The following sections detail these steps. + +## Download Huggingface checkpoints + +Users must first apply for access to download the Llama-3.x checkpoints from [Huggingface](https://huggingface.co/meta-llama). + +## Convert checkpoint format + +We recommend passing `--dtype bf16` for training or finetuning. Inference can be done in bfloat16 or float16. + +### Huggingface format + +The HF checkpoints can be converted to Megatron format by using Megatron's own Llama-3.x checkpoint converter for HF format (see script `tools/checkpoint/loader_llama_mistral.py`). One important argument that must be set correctly is the tensor parallel size (`TP`) for each model. The following table shows these values: + +| Model size | Tensor parallel size (`TP`) | +| ---------- | --------------------------- | +| 1B | 1 | +| 3B | 1 | +| 8B | 1 | +| 70B | 8 | + +Using these values for `TP`, along with the path to the Llama-3.x tokenizer model (automatically downloaded with original checkpoint download; see `${TOKENIZER_MODEL}` below), run the following command from the root of your Megatron source code to convert from HF format to Megatron format: + +``` +$>: python tools/checkpoint/convert.py \ + > --bf16 \ + > --model-type GPT \ + > --loader llama_mistral \ + > --saver core \ + > --target-tensor-parallel-size ${TP} \ + > --checkpoint-type hf \ + > --load-dir ${HF_FORMAT_DIR} \ + > --save-dir ${MEGATRON_FORMAT_DIR} \ + > --tokenizer-model ${TOKENIZER_MODEL} \ + > --model-size llama3 \ +``` + +After this conversion, we are ready to load the checkpoints into a Megatron GPT model. + +## (Optional) Validate checkpoints + +A Megatron-LM text generation server for Llama3 can be launched using the script `examples/inference/llama_mistral/run_text_generation_llama3.sh `. For Llama3.1, please use `examples/inference/llama_mistral/run_text_generation_llama3.1.sh`. + +Once running, query the server with `curl 'http://:5000/api' -X 'PUT' -H 'Content-Type: application/json; charset=UTF-8' -d '{"prompts":[""], "tokens_to_generate":100, "top_k":1}'`. + +A reference generation for comparison can be obtained from the Huggingface transformers library by running `python examples/llama_mistral/huggingface_reference.py --model_path --prompt `. + +## Launch model + +If loading for either inference or finetuning, use the following arguments for Llama 3.0: + +``` +--tensor-model-parallel-size ${TP} \ +--pipeline-model-parallel-size 1 \ +--seq-length 8192 \ +--max-position-embeddings 8192 \ +--tokenizer-type HuggingFaceTokenizer \ +--tokenizer-model ${TOKENIZER_MODEL} \ +--load ${CHECKPOINT_DIR} \ +--exit-on-missing-checkpoint \ +--use-checkpoint-args \ +--no-load-optim \ +--no-load-rng \ +--untie-embeddings-and-output-weights \ +--normalization RMSNorm \ +--position-embedding-type rope \ +--no-masked-softmax-fusion \ +--attention-softmax-in-fp32 \ +--disable-bias-linear \ +--transformer-impl transformer_engine \ +--group-query-attention 8 \ +--attention-dropout 0.0 \ +--hidden-dropout 0.0 \ +--rotary-base 500000 \ +--rotary-percent 1.0 \ +--ffn-hidden-size 14336 \ +--num-attention-heads 32 \ +--swiglu \ +--bf16 \ +``` + +For Llama3.1 please use the following arguments: + +``` +--tensor-model-parallel-size ${TP} \ +--pipeline-model-parallel-size 1 \ +--seq-length 8192 \ +--max-position-embeddings 131072 \ +--tokenizer-type HuggingFaceTokenizer \ +--tokenizer-model ${TOKENIZER_MODEL} \ +--load ${CHECKPOINT_DIR} \ +--exit-on-missing-checkpoint \ +--use-checkpoint-args \ +--no-load-optim \ +--no-load-rng \ +--untie-embeddings-and-output-weights \ +--normalization RMSNorm \ +--position-embedding-type rope \ +--no-masked-softmax-fusion \ +--attention-softmax-in-fp32 \ +--disable-bias-linear \ +--transformer-impl transformer_engine \ +--group-query-attention 8 \ +--attention-dropout 0.0 \ +--hidden-dropout 0.0 \ +--rotary-base 500000 \ +--rotary-percent 1.0 \ +--use-rope-scaling \ +--ffn-hidden-size 14336 \ +--num-attention-heads 32 \ +--swiglu \ +--bf16 \ +``` + +**Note:** If you converted to the legacy model format (i.e., `--saver legacy`), please see [here](#using-legacy-model-format). + +# Mistral-7b + +Megatron currently supports loading the v0.3 release of Mistral-7b (which does not use sliding window attention and offers a larger 32768 vocabulary) for inference and finetuning. Loading these checkpoints consists of several steps: + +1. Get access to download the checkpoints (weights and tokenizer). +2. Convert the checkpoints from HuggingFace format to Megatron format. +3. (Optional) Validate converted checkpoints +4. Setup arguments for launching the model. + +The following sections detail these steps. + +## Download Huggingface checkpoints + +Users must first apply for access to download the Mistral-7b checkpoints through [Huggingface](https://huggingface.co/mistralai/Mistral-7B-v0.3) (HF). + +## Convert checkpoint format + +The HF checkpoints can be converted to Megatron format by using Megatron's own Mistral checkpoint converter for HF format (see script `tools/checkpoint/loader_llama_mistral.py`). + +Using the path to the Mistral tokenizer model (downloaded alongside the HF checkpoint), run the following command from the root of your Megatron source code to convert from HF format to the Megatron core format: + +``` +$>: python tools/checkpoint/convert.py \ + > --bf16 \ + > --model-type GPT \ + > --loader llama_mistral \ + > --saver core \ + > --target-tensor-parallel-size ${TP} \ + > --checkpoint-type hf \ + > --load-dir ${HF_FORMAT_DIR} \ + > --save-dir ${MEGATRON_FORMAT_DIR} \ + > --tokenizer-model ${TOKENIZER_MODEL} \ + > --model-size mistral \ +``` + +After this conversion, we are ready to load the checkpoints into a Megatron core GPT model. + +## (Optional) Validate checkpoints + +A Megatron-LM text generation server for Mistral-7B can be launched using the script `examples/inference/llama_mistral/run_text_generation_mistral.sh `. + +Once running, query the server with `curl 'http://:5000/api' -X 'PUT' -H 'Content-Type: application/json; charset=UTF-8' -d '{"prompts":[""], "tokens_to_generate":100, "top_k":1}'`. + +A reference generation for comparison can be obtained from the Huggingface transformers library by running `python examples/inference/llama_mistral/huggingface_reference.py --model_path --prompt `. + +## Launch model + +If loading for either inference or finetuning, use the following arguments: + +``` +--tensor-model-parallel-size ${TP} \ +--pipeline-model-parallel-size 1 \ +--seq-length 4096 \ +--max-position-embeddings 4096 \ +--tokenizer-type HuggingFaceTokenizer \ +--tokenizer-model ${TOKENIZER_MODEL} \ +--load ${CHECKPOINT_DIR} \ +--exit-on-missing-checkpoint \ +--use-checkpoint-args \ +--no-load-optim \ +--no-load-rng \ +--untie-embeddings-and-output-weights \ +--normalization RMSNorm \ +--position-embedding-type rope \ +--no-masked-softmax-fusion \ +--attention-softmax-in-fp32 +--apply-layernorm-1p \ +--transformer-impl transformer_engine \ +--group-query-attention 8 \ +--disable-bia-linear \ +--rotary-base 1000000 \ +--rotary-percent 1.0 \ +--swiglu \ +--ffn-hidden-size 14336 \ +--num-attention-heads 32 +``` + +**Note:** If you converted to the legacy model format (i.e., `--saver legacy`), please see [here](#using-legacy-model-format). + +# Other Llama-like model support + +*Note: Experimental* + +Many models such as Yi-34B and Qwen2.x use the Llama architecture and may be converted from HuggingFace to Megatron using the commands in [Llama-3.x](#llama-3x). + +# Known numerical differences + +It is not expected that the megatron and Huggingface implementations of llama3.x and mistral models will produce numerically identical results. There are multiple points where small numerical differences are expected. This is a non-exhaustive list: + +1. TransformerEngine (TE) uses the model params_dtype inside RMSNorm whereas the Huggingface implementation uses fp32. See for details: https://github.com/NVIDIA/TransformerEngine/issues/1132 +2. Huggingface `transformers` implements the q, k and v projections in self-attention as separate GEMMs whereas Megatron core combines them into a single GEMM for efficiency. This leads to small numerical differences. + +# Using legacy model format + +In all the checkpoint conversion examples used in this document, the saver format `--saver core` is used, signifying that the newer (and recommended) Megatron GPT model class will be used. I.e.: + +- old class: `megatron.legacy.model.gpt_model.GPTModel` +- new class: `megatron.core.models.gpt.gpt_model.GPTModel` + +Using this new format is the recommended approach. However, if your use case requires using the older class (i.e., convert using `--saver legacy`), then when launching training or finetuning, the following args must be added: + +- `--use-legacy-models`: use the older model class +- `--ckpt-format torch`: use the `torch` checkpoint format, which is the only checkpoint format that is compatible with the legacy model format diff --git a/docs/source/api-guide/context_parallel.rst b/docs/source/api-guide/context_parallel.rst new file mode 100644 index 0000000000..c08defd210 --- /dev/null +++ b/docs/source/api-guide/context_parallel.rst @@ -0,0 +1,35 @@ +context\_parallel package +========================= + +Context parallelism overview +---------------------------- + +.. figure:: ../images/context_parallel/CP_overview.png + :alt: cp_overview + :align: center + + Figure 1: A transformer layer running with TP2CP2. Communications next to Attention are for CP, others are for TP. (AG/RS: all-gather in forward and reduce-scatter in backward, RS/AG: reduce-scatter in forward and all-gather in backward, /AG: no-op in forward and all-gather in backward). + +Context Parallelism ("CP") is a parallelization scheme on the dimension of sequence length. Unlike prior SP (sequence parallelism) which only splits the sequence of Dropout and LayerNorm activations, CP partitions the network inputs and all activations along sequence dimension. With CP, all modules except attention (e.g., Linear, LayerNorm, etc.) can work as usual without any changes, because they do not have inter-token operations. As for attention, the Q (query) of each token needs to compute with the KV (key and value) of all tokens in the same sequence. Hence, CP requires additional all-gather across GPUs to collect the full sequence of KV. Correspondingly, reduce-scatter should be applied to the activation gradients of KV in backward propagation. To reduce activation memory footprint, each GPU only stores the KV of a sequence chunk in forward and gathers KV again in backward. KV communication happens between a GPU and its counterparts in other TP groups. The all-gather and reduce-scatter are transformed to point-to-point communications in ring topology under the hood. Exchanging KV also can leverage MQA/GQA to reduce communication volumes, as they only have one or few attention heads for KV. + +For example, in Figure 1, assuming sequence length is 8K, each GPU processes 4K tokens. GPU0 and GPU2 compose a CP group, they exchange KV with each other. Same thing also happens between GPU1 and GPU3. CP is similar to `Ring Attention `_ but provides better performance by (1) leveraging the latest OSS and cuDNN flash attention kernels; (2) removing unnecessary computation resulted from low-triangle causal masking and achieving optimal load balance among GPUs. + +Context parallelism benefits +---------------------------- + +.. figure:: ../images/context_parallel/CP_results.png + :alt: cp_results + :align: center + + Figure 2: Speedup of 175B GPT with various TP+CP combinations vs. full recompute (i.e., TP8CP1). + +LLM encounters OOM (out of memory) issue with long context (i.e., long sequence length) because of linearly increasing memory footprint of activations. Recomputing activations in backward can avoid OOM but also introduce significant overheads (~30% with full recompute). Enlarging TP (tensor model parallelism) can fix the OOM issue as well, but it potentially makes compute (e.g., Linear) too short to overlap communication latencies. To be clear, scaling out to more GPUs with bigger TP can hit the overlapping problem no matter if OOM happens. + +CP can better address the issues. With CP, each GPU only computes on a part of the sequence, which reduces both computation and communication by CP times. Therefore, there are no concerns about the overlapping between them. The activation memory footprint per GPU is also CP times smaller, hence no OOM issue anymore. As Figure 2 shows, the combinations of TP and CP can achieve optimal performance by eliminating recompute overheads and making the best tradeoff between computation and communications. + +Enabling context parallelism +---------------------------- + +CP support has been added to GPT. All models that share GPT code path also should be able to benefit from CP, such as Llama. CP can work with TP (tensor model parallelism), PP (pipeline model parallelism), and DP (data parallelism), where the total number of GPUs equals TPxCPxPPxDP. CP also can work with different attention variants, including MHA/MQA/GQA, uni-directional and bi-directional masking. + +CP is enabled by simply setting context_parallel_size= in command line. Default context_parallel_size is 1, which means CP is disabled. Running with CP requires Megatron-Core (>=0.5.0) and Transformer Engine (>=1.1). diff --git a/docs/source/api-guide/custom_fsdp.md b/docs/source/api-guide/custom_fsdp.md new file mode 100644 index 0000000000..e265de8ae4 --- /dev/null +++ b/docs/source/api-guide/custom_fsdp.md @@ -0,0 +1,184 @@ +**NOTE: In M-Core 0.14, the custom FSDP refactored its checkpoint implementation to use DTensor-based torch distributed checkpointing. The custom FSDP was also renamed Megatron FSDP. The relevant sections of this document are no longer applicable.** + +# MCore Custom Fully Sharded Data Parallel (FSDP) + +## How to use ? + +Add these flag to enable MCore custom FSDP. + +```bash +--use-megatron-fsdp +--data-parallel-sharding-strategy optim_grads_params +--no-gradient-accumulation-fusion +--use-distributed-optimizer +``` + +## Key Features + +- **Sharding Strategy**: Efficiently shards optimizer states, gradients, and parameters to reduce memory consumption. +- **Communication and Computation Overlap**: Optimized to enable concurrent execution of communication and computation, enhancing overall efficiency. +- **Supports automatic mixed precision training**: Compatible with BF16 O1/O2/O3 recipes, as well as FP8 compute with FP32 parameters and FP8 parameter training, allowing for flexible precision configurations. +- **Tensor Parallelism (TP), Expert Parallelism (EP) and Context Parallelism (CP)**: Compatible with TP, EP and CP configurations, enabling efficient scaling of large language models. +- **Distributed Model Initialization with Meta Device**: Allows model initialization using meta device, followed by layer-by-layer initialization of distributed model weight buffers via the `Module.reset_parameters` API, facilitating the initialization of extremely large models. + +## Configuration Recommendations + +### 1. Disable `CUDA_DEVICE_MAX_CONNECTIONS` + +To ensure full parallelization of FSDP communication and computation, disable the CUDA_DEVICE_MAX_CONNECTIONS environment variable. This step avoids potential bubble in CUDA stream. (But it may slow down TP and CP to some extent.) + +```bash +unset CUDA_DEVICE_MAX_CONNECTIONS +``` + +### 2. Add `--calculate-per-token-loss` + +For gradients sharding mode optimization, include the `--calculate-per-token-loss` flag in your training script. This improves performance by reducing the frequency of gradient scaling, which is also a sizable drain on SM resources. + +## Design of Custom FSDP + +### 1. Overview + +The custom Fully Sharded Data Parallelism (FSDP) implementation in Megatron-Core is specifically designed to optimize memory consumption and performance for large language models. The core design principles include: + + - **Optimized for Large Language Models**: This custom FSDP implementation is tailored to efficiently scale with models containing billions of parameters, ensuring seamless execution and training of massive models. + - **Efficient Memory Consumption**: By strategically sharding optimizer states, gradients, and model parameters, the custom FSDP significantly reduces memory usage. This approach enables the training of models that would otherwise be too large to fit in memory. + - **Efficient Workflow & Overlapping Communication and Computation**: The implementation is engineered to minimize the number of communication steps required during training. It maximizes the overlap between communication and computation, thereby enhancing overall training efficiency and reducing latency. + - **Support for MCore's Efficient Training Methods**: The custom FSDP seamlessly integrates with Megatron-Core's advanced parallelism techniques, including tensor parallelism, expert parallelism and context parallelism. Additionally, it supports automatic mixed precision training, further optimizing training performance and efficiency. + +The design of Custom FSDP draws inspiration from PyTorch FSDP [Zhao, Yanli, et al.](https://arxiv.org/pdf/2304.11277) and MCore's distributed optimizer. The introduction to PyTorch FSDP is referenced here to clarify the underlying concepts of the custom FSDP design. + +> In DistributedDataParallel, (DDP) training, each process/ worker owns a replica of the model and processes a batch of data, finally it uses all-reduce to sum up gradients over different workers. In DDP the model weights and optimizer states are replicated across all workers. FSDP is a type of data parallelism that shards model parameters, optimizer states and gradients across DDP ranks. + +> When training with FSDP, the GPU memory footprint is smaller than when training with DDP across all workers. This makes the training of some very large models feasible by allowing larger models or batch sizes to fit on device. This comes with the cost of increased communication volume. The communication overhead is reduced by internal optimizations like overlapping communication and computation. + +![FSDP workflow](../images/custom_fsdp/FSDP_workflow.png) + +*Notice that the unit processed in workflow here is the “FSDP instance 1: N layers”, where an FSDP instance is the smallest FSDP processing unit (also a PyTorch module), which means that we can safely release this module weights after using it (executing the forward or backward of this module), and there will be no other computations computations relying on these weights. This capability is the foundation of FSDP's layer-by-layer execution and memory-saving strategy. An FSDP instance is also referred to as an **FSDP Unit**.* + +*It is worth noting that an FSDP instance can correspond to multiple FSDP parameter groups. These groups are separated by Data Parallel (DP) communication groups and the data type of the parameter or gradient. Consequently, an FSDP instance may require several parameter-gather tasks before execution (forward or backward). Each **FSDP parameter group** corresponds to one **Data Parallel Buffer** in custom FSDP.* + +At a high level FSDP works as follow: + +In constructor + - Shard model parameters and each rank only keeps its own shard + +In forward path + - Run all_gather to collect all shards from all ranks to recover the full parameter in this FSDP unit + - Run forward computation + - Discard parameter shards it has just collected + +In backward path + - Run all_gather to collect all shards from all ranks to recover the full parameter in this FSDP unit + - Run backward computation + - Run reduce_scatter to sync gradients + - Discard parameters. + +One way to view FSDP’s sharding is to decompose the DDP gradient all-reduce into reduce-scatter and all-gather. Specifically, during the backward pass, FSDP reduces and scatters gradients, ensuring that each rank possesses a shard of the gradients. Then it updates the corresponding shard of the parameters in the optimizer step. Finally, in the subsequent forward pass, it performs an all-gather operation to collect and combine the updated parameter shards. + +![FSDP Allreduce](../images/custom_fsdp/FSDP_Allreduce.png) + +### 2. Custom FSDP underlying data structure + +To implement the FSDP functionality described above, the custom FSDP is designed with the following Python classes and data structure: + +![MCore Custom FSDP Class Diagram](../images/custom_fsdp/MCore_Custom_FSDP_Class_Diagram.png) + +### 3. The custom FSDP interface: FullyShardedDataParallel + +The custom FSDP provides the same programming interface as PyTorch's DistributedDataParallel (DDP) as FullyShardedDataParallel (FSDP). For example, you can apply FSDP to models as follows: + +```python +# Initialize model and optimizer +ddp_config.use_megatron_fsdp = True +ddp_config.data_parallel_sharding_strategy = "optim_grads_params" +model = GPTModel(transformer_config) +model = FullyShardedDataParallel( + transformer_config, + model, + ddp_config, + fsdp_unit_modules = [TransformerLayer, LanguageModelEmbedding], +) +optimizer = torch.optim.AdamW(model.parameters(), lr=lr) +optimizer = DistributedOptimizer(optimizer, [model], [model.param_and_grad_buffer]) + +# Training loop +def train_step(inputs, labels): + optimizer.zero_grad() + for mbs_input, mbs_label in zip(inputs, labels): + outputs = model(mbs_input) + loss = loss_fn(outputs, mbs_label) + loss.backward() + optimizer.step() + +# Save and load model and optimizer state dict +def model_and_optimizer_state_dict(): + state_dict = { + "model": model.sharded_state_dict(), + "optimizer": optimizer.sharded_state_dict(), + } + return state_dict + +def load_model_and_optimizer_state_dict(state_dict): + model.load_state_dict(state_dict["model"]) + optimizer.load_state_dict(state_dict["optimizer"]) +``` + +**Key Notes:** + - You can configure which modules should be treated as FSDP units via the `fsdp_unit_modules` argument. This configuration is mandatory. + - The custom FSDP must be used with a distributed optimizer since it provides distributed checkpointing. + - The data-parallel communication group for parameters is not explicitly shown. Custom FSDP configures these groups as either DP (data-parallel) or EDP (expert data-parallel) based on parameter markings. + +#### 3.1 Initializing Models on the Meta Device + +For training particularly large models with FSDP, you can initialize the model on the meta device. Using PyTorch's `reset_parameters` API, you can initialize model weights layer by layer during the construction of the `ParamAndGradBuffer`. Most PyTorch native modules and TransformerEngine modules support this API (e.g., [PyTorch Linear](https://github.com/pytorch/pytorch/blob/v2.6.0/torch/nn/modules/linear.py#L114), [TE LayerNormLinear](https://github.com/NVIDIA/TransformerEngine/blob/release_v2.0/transformer_engine/pytorch/module/layernorm_linear.py#L1107)). + +```python +# Initialize model on meta device +with torch.device("meta"): + model = GPTModel(config) + +model = FullyShardedDataParallel( + transformer_config, + model, + ddp_config, + fsdp_unit_modules=[TransformerLayer, LanguageModelEmbedding], +) +``` + +**Important Considerations:** +1. *Custom Modules*: If your model contains custom modules, ensure they implement the `reset_parameters` API. Otherwise, you may need to force parameter initialization on a CUDA or CPU device. +2. *Tensor Initialization*: Be cautious of tensors created during model initialization without a specified device—they will default to the meta device. To avoid issues, explicitly specify the device for these tensors to ensure compatibility with this function. + +### 4. Interaction between Custom FSDP and Model Forward/Backward Propagation + +Custom FSDP implements Fully Sharded Data Parallelism (FSDP) through a series of module hooks, gradient hooks, or by adding functions between modules. This involves inserting communications and manipulating parameters and gradients during PyTorch's module forward or backward propagation. + +Module hooks summary: +- Module pre-forward hook(`module.register_forward_pre_hook`): This hook unshards model weights before the forward pass. In the case of an FSDP Unit Module, add a RegisterFSDPBackwardFunction function that will reshard model weights and reduce gradients after module backward propagation. +- Module post-forward hook(`module.register_forward_hook`): This hook is used to reshard model weights after the forward pass. +- Root module pre-backward hook(`root_module.register_full_backward_pre_hook`): This hook checks that all model parameters are resharded, in order to avoid unnecessary memory spikes. It also marks all modules as being in the `TrainingState.PRE_BACKWARD` state. +- Module pre-backward hook(`module.register_full_backward_pre_hook`): This hook is used to unshard the model weights before the backward pass. +- Root module post-backward hook(`torch.autograd.Variable._execution_engine.queue_callback`): This hook is used to make sure all gradients in the backprop are properly handled / available. + +The gradient reduction pipeline maintains a map of gradients to FSDP parameter groups. If all gradients in an FSDP parameter group are ready, it launches a gradient reduction. Note that this assumes that the model's gradients are always generated in a certain order (reverse of `module.parameters()`), as otherwise, FSDP would maintain too many parameter group grad buffers, leading to excessive memory usage. + +#### 4.1 Optimized for Activation Recompute + +Using the activation recompute will cause the same module to execute the forward function first and then the backward function in the backward prop, which will cause model weights unshard twice and model weights reshard twice. If we can tell program that this is a forward + backward operation, we can just call unshard once and reshard once. + +To make this determination, we keep track of the model's state with training_state, `FORWARD`, `PRE_BACKWARD`, `POST_BACKWARD`, `IDLE`. It's worth noting that pre-backward hook act before pre-forward hook, and we'll let pre-backward hook execute the model weight unshard, and then mark the model as `PRE_BACKWARD`, and when pre-forward hook sees this marking it will not perform the unshard operation. Similarly, for model weight reshard duplicate, post-forward hook act before post-backward function, and checking for the `PRE_BACKWARD` flag in the post-forward hook will cancel the unshard. + +### 5. Memory Mechanisms and Features of Custom FSDP + +FSDP can fully distribute the model parameters, gradients, and optimizer states, and for mixed-precision training, it can also fully distribute the high-precision main weights. This is pretty much distributes all the memory except for the activation memory, but FSDP will also face some memory issues. + +FSDP frequently unshards and reshards model weights, which can lead to busy memory allocation and deallocation. This results in untimely tensor releases, causing memory spikes (or even out-of-memory errors), crashes of the PyTorch memory allocator cache, and a large number of `cudaMalloc` and `cudaFree` calls. These issues can significantly slow down the system. + +The problem of untimely tensor release can generally be addressed using the `tensor._typed_storage(). _resize_(0)` API, which immediately deallocates the storage's memory. Custom FSDP provides interfaces in `AllGatherPipeline` and `GradReducePipeline` to replace the temporary buffer memory allocator used for parameter gathering and gradient reduction with ` StorageResizeBasedBucketAllocator`. This replaces the tensor release operation with the `tensor._typed_storage(). _resize_(0)` API. + +The PyTorch memory allocator cache crash is a complex issue that occurs frequently when the actual memory usage approaches the GPU memory limit, leading to poor performance. This problem is challenging and can only be mitigated by avoiding frequent hits on the GPU memory limit. Using a self-managed memory allocator like ` RotaryBucketAllocator` is another potential solution. However, note that `RotaryBucketAllocator` is not yet mature. + +## References + +- [Getting Started with Fully Sharded Data Parallel (FSDP)](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html) diff --git a/docs/source/api-guide/datasets.rst b/docs/source/api-guide/datasets.rst new file mode 100644 index 0000000000..247a3f07d3 --- /dev/null +++ b/docs/source/api-guide/datasets.rst @@ -0,0 +1,104 @@ +datasets package +================ + +.. mdinclude :: ../../../megatron/core/datasets/readme.md + +Submodules +---------- + +datasets.blended\_megatron\_dataset\_config module +--------------------------------------------------- + +.. automodule:: core.datasets.blended_megatron_dataset_config + :members: + :undoc-members: + :show-inheritance: + +datasets.blended\_megatron\_dataset\_builder module +--------------------------------------------------- + +.. automodule:: core.datasets.blended_megatron_dataset_builder + :members: + :undoc-members: + :show-inheritance: + +datasets.megatron\_tokenizer module +----------------------------------- + +.. automodule:: core.datasets.megatron_tokenizer + :members: + :undoc-members: + :show-inheritance: + +datasets.indexed\_dataset module +-------------------------------- + +.. automodule:: core.datasets.indexed_dataset + :members: + :undoc-members: + :show-inheritance: + +datasets.megatron\_dataset module +--------------------------------- + +.. automodule:: core.datasets.megatron_dataset + :members: + :undoc-members: + :show-inheritance: + +datasets.gpt\_dataset module +---------------------------- + +.. automodule:: core.datasets.gpt_dataset + :members: + :undoc-members: + :show-inheritance: + +datasets.masked\_dataset module +------------------------------- + +.. automodule:: core.datasets.masked_dataset + :members: + :undoc-members: + :show-inheritance: + +datasets.bert\_dataset module +----------------------------- + +.. automodule:: core.datasets.bert_dataset + :members: + :undoc-members: + :show-inheritance: + +datasets.t5\_dataset module +--------------------------- + +.. automodule:: core.datasets.t5_dataset + :members: + :undoc-members: + :show-inheritance: + +datasets.blended\_dataset module +---------------------------------- + +.. automodule:: core.datasets.blended_dataset + :members: + :undoc-members: + :show-inheritance: + +datasets.utils module +--------------------- + +.. automodule:: core.datasets.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: core.datasets + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/source/api-guide/dist_checkpointing.rst b/docs/source/api-guide/dist_checkpointing.rst new file mode 100644 index 0000000000..3b683b0b3b --- /dev/null +++ b/docs/source/api-guide/dist_checkpointing.rst @@ -0,0 +1,99 @@ +dist\_checkpointing package +=========================== + +A library for saving and loading the distributed checkpoints. +A "distributed checkpoint" can have various underlying formats (current default format is based on Zarr) +but has a distinctive property - the checkpoint saved in one parallel configuration (tensor/pipeline/data parallelism) +can be loaded in a different parallel configuration. + +Using the library requires defining sharded state_dict dictionaries with functions from *mapping* and *optimizer* modules. +Those state dicts can be saved or loaded with a *serialization* module using strategies from *strategies* module. + +Safe Checkpoint Loading +----------------------- + +Since **PyTorch 2.6**, the default behavior of `torch.load` is `weights_only=True`. +This ensures that only tensors and allow-listed classes are loaded, reducing the risk of arbitrary code execution. + +If you encounter an error such as: + +.. code-block:: bash + + WeightsUnpickler error: Unsupported global: GLOBAL argparse.Namespace was not an allowed global by default. + +you can fix it by explicitly allow-listing the missing class in your script: + +.. code-block:: python + + import torch, argparse + + torch.serialization.add_safe_globals([argparse.Namespace]) + + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + dist_checkpointing.strategies + +Submodules +---------- + +dist\_checkpointing.serialization module +---------------------------------------- + +.. automodule:: core.dist_checkpointing.serialization + :members: + :undoc-members: + :show-inheritance: + +dist\_checkpointing.mapping module +---------------------------------- + +.. automodule:: core.dist_checkpointing.mapping + :members: + :undoc-members: + :show-inheritance: + +dist\_checkpointing.optimizer module +------------------------------------ + +.. automodule:: core.dist_checkpointing.optimizer + :members: + :undoc-members: + :show-inheritance: + +dist\_checkpointing.core module +------------------------------- + +.. automodule:: core.dist_checkpointing.core + :members: + :undoc-members: + :show-inheritance: + +dist\_checkpointing.dict\_utils module +-------------------------------------- + +.. automodule:: core.dist_checkpointing.dict_utils + :members: + :undoc-members: + :show-inheritance: + + +dist\_checkpointing.utils module +-------------------------------- + +.. automodule:: core.dist_checkpointing.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: core.dist_checkpointing + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api-guide/dist_checkpointing.strategies.rst b/docs/source/api-guide/dist_checkpointing.strategies.rst new file mode 100644 index 0000000000..41e674c761 --- /dev/null +++ b/docs/source/api-guide/dist_checkpointing.strategies.rst @@ -0,0 +1,50 @@ +dist\_checkpointing.strategies package +====================================== + +Package defining different checkpoint formats (backends) and saving/loading algorithms (strategies). + +Strategies can be used for implementing new checkpoint formats or implementing new (more optimal for a given use case) ways of saving/loading of existing formats. +Strategies are passed to `dist_checkpointing.load` and `dist_checkpointing.save` functions and control the actual saving/loading procedure. + +Submodules +---------- + +dist\_checkpointing.strategies.base module +------------------------------------------ + +.. automodule:: core.dist_checkpointing.strategies.base + :members: + :undoc-members: + :show-inheritance: + +dist\_checkpointing.strategies.tensorstore module +------------------------------------------------- + +.. automodule:: core.dist_checkpointing.strategies.tensorstore + :members: + :undoc-members: + :show-inheritance: + +dist\_checkpointing.strategies.two\_stage module +------------------------------------------------ + +.. automodule:: core.dist_checkpointing.strategies.two_stage + :members: + :undoc-members: + :show-inheritance: + +dist\_checkpointing.strategies.zarr module +------------------------------------------ + +.. automodule:: core.dist_checkpointing.strategies.zarr + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: core.dist_checkpointing.strategies + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api-guide/dist_optimizer.md b/docs/source/api-guide/dist_optimizer.md new file mode 100644 index 0000000000..34f42d5343 --- /dev/null +++ b/docs/source/api-guide/dist_optimizer.md @@ -0,0 +1,40 @@ +# Distributed Optimizer + +The motivation for the distributed optimizer is to save memory by distributing the optimizer state evenly across data parallel ranks (https://arxiv.org/abs/1910.02054), versus the naive method of replicating the optimizer state across data parallel ranks. + +Theoretical memory savings vary depending on the combination of the datatype of the model's parameters (`param_dtype`) and main gradients accumulated across data-parallel replicas (`grad_dtype`). We always use `fp32` main parameters for optimizer steps. In the current implementation, the theoretical number of bytes per parameter is (where d is the data parallel size): + +| | Non-distributed optim | Distributed optim | +| ------ | ------ | ------ | +| `fp16` parameters, `fp16` gradients | 20 | 4 + 16/d | +| `bf16` parameters, `fp32` gradients | 18 | 6 + 12/d | +| `fp32` parameters, `fp32` gradients | 16 | 8 + 8/d | + +Our implementation of the distributed optimizer uses contiguous buffers for parameters and main gradients; model gradients are copied over to the main gradients as soon as they are fully computed. + +The figures below illustrate the distributed optimizer's sharding scheme, and the key steps of the distributed optimizer's parameter update: + +## Data flow + +![Data flow](../images/distrib_optimizer/data_flow.png) + +## Sharding scheme + +![Sharding scheme](../images/distrib_optimizer/sharding_scheme.png) + +## Key steps + +_(note: using illustrations above, assuming `bf16` model weights, `bf16` model gradients that are computed by the backward pass and `fp32` main gradients that are also used for optimizer steps; we always use `fp32` main weights for optimizer steps)_ + +- Backward pass finishes (gradient buffer holds 16 `fp32` gradient elements). +- Call reduce-scatter on each DP rank. +- Each DP rank now has 4 elements within the gradient buffer that are fully reduced (remaining 12 elements are garbage). + - DP rank 0 has gradient values for elements [0:4]. + - DP rank 1 has gradient values for elements [4:8]. + - DP rank 2 has gradient values for elements [8:12]. + - DP rank 3 has gradient values for elements [12:16]. +- Optimizer.step(). +- Each DP rank copies its 4 `fp32` main parameter elements into the corresponding `bf16` parameter buffer (each element is cast from fp32 to fp16). +- Call all-gather on each DP rank. +- The parameter buffer now contains all 16, fully updated, `bf16` model parameter elements. Parameters in PyTorch modules already point to the appropriate locations in this parameter buffer, and thus forward passes are ready to run after the all-gather completes. +- At this point, the gradient buffer is also ready to be zero'd for the next iteration. diff --git a/docs/source/api-guide/distributed.rst b/docs/source/api-guide/distributed.rst new file mode 100644 index 0000000000..737820331c --- /dev/null +++ b/docs/source/api-guide/distributed.rst @@ -0,0 +1,53 @@ +distributed package +=================== + +This package contains various utilities to finalize model weight gradients +on each rank before the optimizer step. This includes a distributed data +parallelism wrapper to all-reduce or reduce-scatter the gradients across +data-parallel replicas, and a `finalize\_model\_grads` method to +synchronize gradients across different parallelism modes (e.g., 'tied' +layers on different pipeline stages, or gradients for experts in a MoE on +different ranks due to expert parallelism). + +Submodules +---------- + +distributed.distributed\_data\_parallel +--------------------------------------- + +Model wrapper for distributed data parallelism. Stores gradients in a +contiguous buffer, and supports the option of overlapping communication +(all-reduce or reduce-scatter) with backprop computation by breaking up +full model's gradients into smaller buckets and running all-reduce / +reduce-scatter on each bucket asynchronously. + +.. automodule:: core.distributed.distributed_data_parallel + :members: + :undoc-members: + :show-inheritance: + +distributed.finalize\_model\_grads +---------------------------------- + +Finalize model gradients for optimizer step across all used parallelism modes. +Synchronizes the all-reduce / reduce-scatter of model gradients across DP replicas, +all-reduces the layernorm gradients for sequence parallelism, embedding gradients +across first and last pipeline stages (if not tied), and expert gradients for expert +parallelism. + +.. automodule:: core.distributed.finalize_model_grads + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +Contains functionality to synchronize gradients across different ranks before +optimizer step. + +.. automodule:: core.distributed + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api-guide/fusions.rst b/docs/source/api-guide/fusions.rst new file mode 100644 index 0000000000..22782ca84e --- /dev/null +++ b/docs/source/api-guide/fusions.rst @@ -0,0 +1,65 @@ +fusions package +=============== + +This package provides modules that provide commonly fused +operations. Fusing operations improves compute efficiency by +increasing the amount of work done each time a tensor is read from +memory. To perform the fusion, modules in this either rely on PyTorch +functionality for doing just-in-time compilation +(i.e. `torch.jit.script` in older PyTorch versions of `torch.compile` +in recent versions), or call into custom kernels in external libraries +such as Apex or TransformerEngine. + +Submodules +---------- + +fusions.fused\_bias\_dropout module +----------------------------------- + +This module uses PyTorch JIT to fuse the bias add and dropout operations. Since dropout is not used during inference, different functions are used when in train mode and when in inference mode. + +.. automodule:: core.fusions.fused_bias_dropout + :members: + :undoc-members: + :show-inheritance: + +fusions.fused\_bias\_gelu module +-------------------------------- + +This module uses PyTorch JIT to fuse the bias add and GeLU nonlinearity operations. + +.. automodule:: core.fusions.fused_bias_gelu + :members: + :undoc-members: + :show-inheritance: + +fusions.fused\_layer\_norm module +--------------------------------- + +This module provides a wrapper around various fused LayerNorm implementation in Apex. + +.. automodule:: core.fusions.fused_layer_norm + :members: + :undoc-members: + :show-inheritance: + +fusions.fused\_softmax module +----------------------------- + +This module provides wrappers around variations of Softmax in Apex. + +.. automodule:: core.fusions.fused_softmax + :members: + :undoc-members: + :show-inheritance: + +fusions.fused\_cross\_entropy\_loss module +------------------------------------------ + +This module uses PyTorch JIT to fuse the cross entropy loss calculation and batches communication calls. + +.. automodule:: core.fusions.fused_cross_entropy + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/source/api-guide/index.rst b/docs/source/api-guide/index.rst new file mode 100644 index 0000000000..710a7caf4d --- /dev/null +++ b/docs/source/api-guide/index.rst @@ -0,0 +1,24 @@ +API Guide +========= + +.. toctree:: + :maxdepth: 4 + + models + tensor_parallel + context_parallel + pipeline_parallel + custom_fsdp + fusions + transformer + moe + dist_checkpointing + dist_optimizer + distributed + datasets + multi_latent_attention + num_microbatches_calculator + optimizer_param_scheduler + optimizer_cpu_offload + multi_token_prediction + tokenizers diff --git a/docs/source/api-guide/models.bert.rst b/docs/source/api-guide/models.bert.rst new file mode 100644 index 0000000000..1b562ce72c --- /dev/null +++ b/docs/source/api-guide/models.bert.rst @@ -0,0 +1,22 @@ +models.bert package +=================== +Useful package for training bert and bert like encoder only models. It optionally comes with a binary head that can be used for classification tasks . + +Submodules +---------- + +models.bert.bert\_model module +------------------------------ + +.. automodule:: core.models.bert.bert_model + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: core.models.bert + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api-guide/models.gpt.rst b/docs/source/api-guide/models.gpt.rst new file mode 100644 index 0000000000..31c4da6a9c --- /dev/null +++ b/docs/source/api-guide/models.gpt.rst @@ -0,0 +1,22 @@ +models.gpt package +================== +This is the implementation of the popular GPT model. It supports several features like model parallelization (Tensor Parallel, Pipeline Parallel, Data Parallel) , mixture of experts, FP8 , Distributed optimizer etc. We are constantly adding new features. So be on the lookout or raise an issue if you want to have something added. + +Submodules +---------- + +models.gpt.gpt\_model module +---------------------------- + +.. automodule:: core.models.gpt.gpt_model + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: core.models.gpt + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api-guide/models.rst b/docs/source/api-guide/models.rst new file mode 100644 index 0000000000..12c40e4f35 --- /dev/null +++ b/docs/source/api-guide/models.rst @@ -0,0 +1,21 @@ +models package +============== +This package contains most of the popular LLMs . Currently we have support for GPT, Bert, T5 and Retro . This is an ever growing list so keep an eye out. + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + models.gpt + models.t5 + models.bert + +Module contents +--------------- + +.. automodule:: core.models + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api-guide/models.t5.rst b/docs/source/api-guide/models.t5.rst new file mode 100644 index 0000000000..1cc3315682 --- /dev/null +++ b/docs/source/api-guide/models.t5.rst @@ -0,0 +1,21 @@ +models.t5 package +================= + +Submodules +---------- + +models.t5.t5\_model module +-------------------------- + +.. automodule:: core.models.T5.t5_model + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: core.models.T5 + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api-guide/moe.rst b/docs/source/api-guide/moe.rst new file mode 100644 index 0000000000..9afc01e080 --- /dev/null +++ b/docs/source/api-guide/moe.rst @@ -0,0 +1,4 @@ +Mixture of Experts package +========================== + +.. mdinclude :: ../../../megatron/core/transformer/moe/README.md diff --git a/docs/source/api-guide/multi_latent_attention.rst b/docs/source/api-guide/multi_latent_attention.rst new file mode 100644 index 0000000000..64e2da07d0 --- /dev/null +++ b/docs/source/api-guide/multi_latent_attention.rst @@ -0,0 +1,14 @@ +Multi-Latent Attention +====================== + +Multi-Latent Attention overview +------------------------------- + +Multi-Latent Attention ("MLA") is an innovative attention mechanism introduced by Deepseek team that enhances the efficiency of attention computation by leveraging multiple latent spaces. This approach is particularly beneficial for large language models (LLMs), as it reduces the computational burden associated with traditional attention mechanisms. According to Deepseek-V2 technical report, MLA achieves better performance compared to Multi-Head Attention (MHA) and requires smaller KV cache. + +Enabling Multi-Latent Attention +------------------------------- + +To enable MLA in Megatron-LM, set the following flags in command line: +- `--multi-latent-attention` to enable MLA in MLP. +- Set `MLATransformerConfig` to configure MLA. diff --git a/docs/source/api-guide/multi_token_prediction.md b/docs/source/api-guide/multi_token_prediction.md new file mode 100644 index 0000000000..4059fa5326 --- /dev/null +++ b/docs/source/api-guide/multi_token_prediction.md @@ -0,0 +1,23 @@ +# Multi-Token Prediction (MTP) + +Multi-Token Prediction (MTP) extends the prediction scope to multiple future tokens at each position. On the one hand, an MTP objective densifies the training signals and may improve +data efficiency. On the other hand, MTP may enable the model to pre-plan its representations for better prediction of future tokens. In this implementation of MTP, we sequentially predict additional tokens and keep the complete causal chain at each prediction depth. The following figure illustrates our implementation of MTP in [DeepSeek-V3](https://github.com/deepseek-ai/DeepSeek-V3/). + +![MTP_implementation](../images/multi_token_prediction/MTP_implementation.png) + +The k-th MTP module consists of a shared embedding layer, a projection matrix, a Transformer block, and a shared output head. For the i-th input token at the (k - 1)-th prediction depth, we first combine the representation of the i-th token and the embedding of the (i + K)-th token with the linear projection. The combined serves as the input of the Transformer block at the k-th depth to produce the output representation. + +For more information, please refer to [DeepSeek-V3 Technical Report](https://github.com/deepseek-ai/DeepSeek-V3/blob/main/DeepSeek_V3.pdf) + +## Related Arguments + +We can train GPTModel like models with Multi-Token Prediction (MTP) by setting mtp_num_layers to be a positive integer. + +| Item | Description | +| --- | --- | +| mtp_num_layers | Number of Multi-Token Prediction (MTP) Layers. MTP extends the prediction scope to multiple future tokens at each position. This MTP implementation sequentially predict additional tokens by using D sequential modules to predict D additional tokens. Default is None. | +| mtp_loss_scaling_factor | Scaling factor of Multi-Token Prediction (MTP) loss. We compute the average of the MTP losses across all depths, and multiply it the scaling factor to obtain the overall MTP loss, which serves as an additional training objective. Default is 0.1. | + +## Precautions + +Please do not use Context Parallel (CP), or arbitrary AttnMaskType, or learned absolute position embedding type with MTP. These use cases are not yet supported. diff --git a/docs/source/api-guide/num_microbatches_calculator.rst b/docs/source/api-guide/num_microbatches_calculator.rst new file mode 100644 index 0000000000..4790b31749 --- /dev/null +++ b/docs/source/api-guide/num_microbatches_calculator.rst @@ -0,0 +1,12 @@ +Microbatches Calculator +======================= +This api is used to calculate the number of microbatches required to fit a given model on a given batch size. + + +Module contents +--------------- + +.. automodule:: core.num_microbatches_calculator + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api-guide/optimizer_cpu_offload.rst b/docs/source/api-guide/optimizer_cpu_offload.rst new file mode 100644 index 0000000000..fdbae6654b --- /dev/null +++ b/docs/source/api-guide/optimizer_cpu_offload.rst @@ -0,0 +1,4 @@ +Optimizer CPU offload package +============================== + +.. mdinclude :: ../../../megatron/core/optimizer/cpu_offloading/README.md diff --git a/docs/source/api-guide/optimizer_param_scheduler.rst b/docs/source/api-guide/optimizer_param_scheduler.rst new file mode 100644 index 0000000000..caf5d8abfb --- /dev/null +++ b/docs/source/api-guide/optimizer_param_scheduler.rst @@ -0,0 +1,12 @@ +Optimizer Parameters Scheduler +============================== +This api is used to calculate the learning rate and weight decay for the optimizer. + + +Module contents +--------------- + +.. automodule:: core.optimizer_param_scheduler + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api-guide/pipeline_parallel.rst b/docs/source/api-guide/pipeline_parallel.rst new file mode 100644 index 0000000000..0c1909d903 --- /dev/null +++ b/docs/source/api-guide/pipeline_parallel.rst @@ -0,0 +1,49 @@ +pipeline\_parallel package +========================== + +This package contains implementations for two different pipeline parallelism +schedules (one without interleaving and one with interleaving, see `Efficient +Large-Scale Language Model Training on GPU Clusters Using Megatron-LM `_ +for details), and a default no-pipelining schedule. It also contains methods +for the point-to-point communication that is needed between pipeline stages. + +Submodules +---------- + +.. mdinclude:: pipeline_parallel_layout.md + +pipeline\_parallel.p2p\_communication module +-------------------------------------------- + +Contains implementations for the various point-to-point communication needed +(e.g., `recv_forward` and `recv_backward`) in the different pipeline parallelism +schedules. + +.. automodule:: core.pipeline_parallel.p2p_communication + :members: + :undoc-members: + :show-inheritance: + +pipeline\_parallel.schedules module +----------------------------------- + +Contains implementations for two pipeline parallelism schedules +(`forward_backward_pipelining_with_interleaving`for pipeline parallelism with +interleaving, `forward_backward_pipelining_without_interleaving` for pipeline +parallelism without interleaving) and a default no-pipelining schedule +(`forward_backward_no_pipelining`). `get_forward_backward_func` returns the right +scheduling function to use based on the configuration being trained +(e.g., if pipeline-parallel size is 1, use `forward_backward_no_pipelining`). + +.. automodule:: core.pipeline_parallel.schedules + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: core.pipeline_parallel + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api-guide/pipeline_parallel_layout.md b/docs/source/api-guide/pipeline_parallel_layout.md new file mode 100644 index 0000000000..30c8ce1a50 --- /dev/null +++ b/docs/source/api-guide/pipeline_parallel_layout.md @@ -0,0 +1,26 @@ +# Custom Pipeline Model Parallel Layout + +*This is an experimental feature and may be changed.* + +`--pipeline-model-parallel-layout` is a flexible API for defining the pipeline parallel partitioning, which is essential for balanced partitioning for an imbalanced model. For example, to partition DeepSeek-V3 (61 decoder layers + 1 mtp layer) with PP16VPP2, we can include the arguments as follows: + +```bash +--pipeline-model-parallel-size 16 +--pipeline-model-parallel-layout "Et*3|(tt|)*29,m|L" +``` + +| PP \ VPP rank | 0 | 1 | +|---------------|-------------------------|---------------| +| 0 | embedding + 3 × decoder | 2 × decoder | +| 1~13 | 2 × decoder | 2 × decoder | +| 14 | 2 × decoder | mtp | +| 15 | 2 × decoder | loss | + +In the layout string, stages are split by '|'. Replicated stages or layers can be described with multiplication. Commas can be used cosmetically. Symbol choices: + +* `E` = embedding layer +* `t` = transformer decoder layer +* `m` = MTP layer +* `L` = loss calculation layer + +Note that it is legal to have empty stages, e.g., `E||t|L` (the second stage is empty). diff --git a/docs/source/api-guide/tensor_parallel.rst b/docs/source/api-guide/tensor_parallel.rst new file mode 100644 index 0000000000..d8ae9dea22 --- /dev/null +++ b/docs/source/api-guide/tensor_parallel.rst @@ -0,0 +1,67 @@ +tensor\_parallel package +======================== + +This package contains an implementation for tensor parallelism in transformer +models (see `Megatron-LM: Training Multi-Billion Parameter Language Models +Using Model Parallelism `_ and `Reducing +Activation Recomputation in Large Transformer Models `_ +for details). + +Submodules +---------- + +tensor\_parallel.cross\_entropy module +-------------------------------------- + +.. automodule:: core.tensor_parallel.cross_entropy + :members: + :undoc-members: + :show-inheritance: + +tensor\_parallel.data module +---------------------------- + +.. automodule:: core.tensor_parallel.data + :members: + :undoc-members: + :show-inheritance: + +tensor\_parallel.layers module +------------------------------ + +.. automodule:: core.tensor_parallel.layers + :members: + :undoc-members: + :show-inheritance: + +tensor\_parallel.mappings module +-------------------------------- + +.. automodule:: core.tensor_parallel.mappings + :members: + :undoc-members: + :show-inheritance: + +tensor\_parallel.random module +------------------------------ + +.. automodule:: core.tensor_parallel.random + :members: + :undoc-members: + :show-inheritance: + +tensor\_parallel.utils module +----------------------------- + +.. automodule:: core.tensor_parallel.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: core.tensor_parallel + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api-guide/tokenizers.md b/docs/source/api-guide/tokenizers.md new file mode 100644 index 0000000000..5aaf9866f1 --- /dev/null +++ b/docs/source/api-guide/tokenizers.md @@ -0,0 +1,137 @@ +# New Tokenizer System + +## Key Differences from the Old Tokenizer System + +### 1. Hugging Face–style API + +We now have a `MegatronTokenizer` class that provides a familiar, simple API similar to Hugging Face’s: + +`.from_pretrained()` – Load a tokenizer from a directory or file, automatically detecting the type and settings. + +`.write_metadata()` – Save tokenizer configuration (metadata) so that it can be reused without re-specifying parameters. + +This eliminates the need for long initialization arguments and hard-coded settings in training scripts. + +### 2. Tokenizer Metadata + +A metadata file (JSON) now stores all essential tokenizer configuration in one place: + - Tokenizer library (e.g., HuggingFace, SentencePiece, TikToken, etc.) + - Chat templates + - Tokenizer class + +Benefits: + - You only need to set these parameters once. + - No more passing multiple CLI arguments for tokenizer settings. + - Easy sharing — just copy the tokenizer directory with its metadata file. + +### 3. Library Classes Are Now Internal + +In the old system, you had to know which tokenizer library to use (`SentencePieceTokenizer`, `HuggingFaceTokenizer`, etc.) and instantiate it manually. + +In the new system: + - The library is automatically detected from the metadata. + - The correct tokenizer implementation is chosen under the hood. + - Users don’t need to manually manage tokenizer classes. + +### 3. Support for Model-specific Tokenizer Classes + +The system now supports: + - Built-in LLM-specific tokenizers. + - Custom tokenizers: You can create your own tokenizer class by inheriting from `MegatronTokenizerText` and specify it in the `tokenizer_class` field in the metadata file. + - This allows advanced customization while keeping defaults simple for most users. + +### 4. Usage + +**Creating and Saving Metadata** + +```python +from megatron.core.tokenizers import MegatronTokenizer + +# The metadata will be stored as a file named tokenizer_metadata.json inside the tokenizer’s directory. +MegatronTokenizer.write_metadata( + tokenizer_path="/path/to/tokenizer.model", + tokenizer_library="sentencepiece", + chat_template="chat template in jinja format", +) + +# To use custom tokenizer class +from megatron.core.tokenizers.text import MegatronTokenizerText + +class CustomTokenizer(MegatronTokenizerText): + ... + +MegatronTokenizer.write_metadata( + tokenizer_path="/path/to/tokenizer.model", + tokenizer_library="sentencepiece", + chat_template="chat template in jinja format", + tokenizer_class=CustomTokenizer, +) + +# To save metadata to another dir +MegatronTokenizer.write_metadata( + tokenizer_path="/path/to/tokenizer.model", + tokenizer_library="sentencepiece", + metadata_path="/path/to/save/metadata.json", +) + +``` + +**Restoring the tokenizer** + +```python +from megatron.core.tokenizers import MegatronTokenizer + +MegatronTokenizer.from_pretrained( + tokenizer_path="/path/to/tokenizer.model", +) + +# If metadata is not in tokenizer’s dir +MegatronTokenizer.from_pretrained( + tokenizer_path="/path/to/tokenizer.model", + metadata_path="/path/to/metadata.json", +) + +# Pass metadata as dict +MegatronTokenizer.from_pretrained( + tokenizer_path="GPT2BPETokenizer", + metadata_path={"library": "megatron"}, + vocab_file="/path/to/vocab.txt", +) + +# Pass additional params +MegatronTokenizer.from_pretrained( + tokenizer_path="/path/to/tokenizer/model.json", + metadata_path={"library": "tiktoken"}, + pattern="v2", + num_special_tokens=1000, +) + +# Null tokenzier +MegatronTokenizer.from_pretrained( + metadata_path={"library": "null"}, + vocab_size=131072, +) + +``` + +### 4. Megatron-LM pretraining compatibility + +New tokenizer system is compatible with megatron-lm pretrain script. If `--tokenizer-metadata` is not specified, a default metadata file will be generated automatically. + +```bash +# Null tokenizer +torchrun --nproc_per_node=1 pretrain_gpt.py \ + ... \ + --tokenizer-type NullTokenizer \ + --vocab-size 131072 + +# HuggingFace tokenizer with specified metadata +torchrun --nproc_per_node=1 pretrain_gpt.py \ + ... \ + --tokenizer-type HuggingFaceTokenizer \ + --tokenizer-model meta-llama/Meta-Llama-3-8B \ + --tokenizer-metadata /path/to/metadata.json + +``` + +The Megatron-LM pretraining script still supports the legacy tokenizer system. To enable it, simply add the `--legacy-tokenizer` flag. diff --git a/docs/source/api-guide/transformer.rst b/docs/source/api-guide/transformer.rst new file mode 100644 index 0000000000..6e2e894d54 --- /dev/null +++ b/docs/source/api-guide/transformer.rst @@ -0,0 +1,136 @@ +transformer package +=================== + +The `transformer` package provides a customizable and configurable +implementation of the transformer model architecture. Each component +of a transformer stack, from entire layers down to individual linear +layers, can be customized by swapping in different PyTorch modules +using the "spec" parameters (see `here +`_). The +configuration of the transformer (hidden size, number of layers, +number of attention heads, etc.) is provided via a `TransformerConfig` +object. + +Submodules +---------- + +transformer.attention module +---------------------------- + +This is the entire attention portion, either self or cross attention, +of a transformer layer including the query, key, and value +projections, a "core" attention calculation (e.g. dot product +attention), and final output linear projection. + +.. automodule:: core.transformer.attention + :members: + :undoc-members: + :show-inheritance: + +transformer.dot\_product\_attention module +------------------------------------------ + +This is a PyTorch-only implementation of dot product attention. A more +efficient implementation, like those provided by FlashAttention or +CUDNN's FusedAttention, are typically used when training speed is +important. + +.. automodule:: core.transformer.dot_product_attention + :members: + :undoc-members: + :show-inheritance: + +transformer.enums module +------------------------ + +.. automodule:: core.transformer.enums + :members: + :undoc-members: + :show-inheritance: + +transformer.identity\_op module +------------------------------- + +This provides a pass-through module that can be used in specs to +indicate that the operation should not be performed. For example, when +using LayerNorm with the subsequent linear layer, an IdentityOp can be +passed in as the LayerNorm module to use. + +.. automodule:: core.transformer.identity_op + :members: + :undoc-members: + :show-inheritance: + +transformer.mlp module +---------------------- + +This is the entire MLP portion of the transformer layer with an input +projection, non-linearity, and output projection. + +.. automodule:: core.transformer.mlp + :members: + :undoc-members: + :show-inheritance: + +transformer.module module +------------------------- + +This provides a common base class for all modules used in the +transformer that contains some common functionality. + +.. automodule:: core.transformer.module + :members: + :undoc-members: + :show-inheritance: + +transformer.transformer\_block module +------------------------------------- + +A block, or stack, of several transformer layers. The layers can all +be the same or each can be unique. + +.. automodule:: core.transformer.transformer_block + :members: + :undoc-members: + :show-inheritance: + +transformer.transformer\_config module +-------------------------------------- + +This contains all of the configuration options for the +transformer. Using a dataclass reduces code bloat by keeping all +arguments together in a dataclass instead of passing several arguments +through multiple layers of function calls. + +.. automodule:: core.transformer.transformer_config + :members: + :undoc-members: + :show-inheritance: + +transformer.transformer\_layer module +------------------------------------- + +A single standard transformer layer including attention and MLP blocks. + +.. automodule:: core.transformer.transformer_layer + :members: + :undoc-members: + :show-inheritance: + +transformer.utils module +------------------------ + +Various utilities used in the transformer implementation. + +.. automodule:: core.transformer.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: core.transformer + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/images/context_parallel/CP_overview.png b/docs/source/images/context_parallel/CP_overview.png new file mode 100644 index 0000000000..38c55b371a Binary files /dev/null and b/docs/source/images/context_parallel/CP_overview.png differ diff --git a/docs/source/images/context_parallel/CP_results.png b/docs/source/images/context_parallel/CP_results.png new file mode 100644 index 0000000000..e0415ce86e Binary files /dev/null and b/docs/source/images/context_parallel/CP_results.png differ diff --git a/docs/source/images/custom_fsdp/FSDP_Allreduce.png b/docs/source/images/custom_fsdp/FSDP_Allreduce.png new file mode 100644 index 0000000000..66e2391ed0 Binary files /dev/null and b/docs/source/images/custom_fsdp/FSDP_Allreduce.png differ diff --git a/docs/source/images/custom_fsdp/FSDP_workflow.png b/docs/source/images/custom_fsdp/FSDP_workflow.png new file mode 100644 index 0000000000..588b6f220a Binary files /dev/null and b/docs/source/images/custom_fsdp/FSDP_workflow.png differ diff --git a/docs/source/images/custom_fsdp/MCore_Custom_FSDP_Class_Diagram.png b/docs/source/images/custom_fsdp/MCore_Custom_FSDP_Class_Diagram.png new file mode 100644 index 0000000000..f9603079b9 Binary files /dev/null and b/docs/source/images/custom_fsdp/MCore_Custom_FSDP_Class_Diagram.png differ diff --git a/docs/source/images/distrib_optimizer/data_flow.png b/docs/source/images/distrib_optimizer/data_flow.png new file mode 100644 index 0000000000..01f5cfb2e7 Binary files /dev/null and b/docs/source/images/distrib_optimizer/data_flow.png differ diff --git a/docs/source/images/distrib_optimizer/sharding_scheme.png b/docs/source/images/distrib_optimizer/sharding_scheme.png new file mode 100644 index 0000000000..e48dd95024 Binary files /dev/null and b/docs/source/images/distrib_optimizer/sharding_scheme.png differ diff --git a/docs/source/images/moe/token_drop.png b/docs/source/images/moe/token_drop.png new file mode 100644 index 0000000000..1c335ee7aa Binary files /dev/null and b/docs/source/images/moe/token_drop.png differ diff --git a/docs/source/images/multi_token_prediction/MTP_implementation.png b/docs/source/images/multi_token_prediction/MTP_implementation.png new file mode 100644 index 0000000000..1f246c3e39 Binary files /dev/null and b/docs/source/images/multi_token_prediction/MTP_implementation.png differ diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000000..f2a89b8ac7 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,23 @@ +.. Lumache documentation master file, created by + sphinx-quickstart on Tue Aug 15 13:44:10 2023. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Megatron Core User Guide +=================================== + +**Megatron Core** is a Python library that has the core components required to build your language models. +A reference implementation of Megatron Core can be found in `NeMo `_ It offers a *simple* and +*intuitive* API. + +.. toctree:: + :maxdepth: 2 + :caption: User Guide + + user-guide/index + +.. toctree:: + :maxdepth: 3 + :caption: API Guide + + api-guide/index diff --git a/docs/source/user-guide/index.rst b/docs/source/user-guide/index.rst new file mode 100644 index 0000000000..a1edf4271c --- /dev/null +++ b/docs/source/user-guide/index.rst @@ -0,0 +1,6 @@ +User Guide +============ + +.. mdinclude:: ../../../megatron/core/QuickStart.md +.. mdinclude:: ../../../megatron/core/Installation_Guide.md +.. mdinclude:: ../../../megatron/core/MSC_Integration.md \ No newline at end of file diff --git a/examples/__init__.py b/examples/__init__.py new file mode 100644 index 0000000000..0519ecba6e --- /dev/null +++ b/examples/__init__.py @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/examples/detxoify_lm/README.md b/examples/academic_paper_scripts/detxoify_lm/README.md similarity index 100% rename from examples/detxoify_lm/README.md rename to examples/academic_paper_scripts/detxoify_lm/README.md diff --git a/examples/detxoify_lm/annotations/filter-selfgeneration.py b/examples/academic_paper_scripts/detxoify_lm/annotations/filter-selfgeneration.py similarity index 100% rename from examples/detxoify_lm/annotations/filter-selfgeneration.py rename to examples/academic_paper_scripts/detxoify_lm/annotations/filter-selfgeneration.py diff --git a/examples/detxoify_lm/annotations/perspective_api_annotate.py b/examples/academic_paper_scripts/detxoify_lm/annotations/perspective_api_annotate.py similarity index 98% rename from examples/detxoify_lm/annotations/perspective_api_annotate.py rename to examples/academic_paper_scripts/detxoify_lm/annotations/perspective_api_annotate.py index fd82c2a2ae..9736db099a 100644 --- a/examples/detxoify_lm/annotations/perspective_api_annotate.py +++ b/examples/academic_paper_scripts/detxoify_lm/annotations/perspective_api_annotate.py @@ -107,7 +107,7 @@ def get_score(line): except UnicodeDecodeError: try: decoded_text = encoded_text[:20476].decode('utf8') - except: + except Exception: print("Error occurred") data['score'] = None return json.dumps(data) @@ -138,7 +138,7 @@ def get_scores(lines): except UnicodeDecodeError: try: decoded_text = encoded_text[:20476].decode('utf8') - except: + except Exception: print("Error occurred") data['score'] = None all_data.append(json.dumps(data)) diff --git a/examples/detxoify_lm/annotations/preprocess.sh b/examples/academic_paper_scripts/detxoify_lm/annotations/preprocess.sh similarity index 100% rename from examples/detxoify_lm/annotations/preprocess.sh rename to examples/academic_paper_scripts/detxoify_lm/annotations/preprocess.sh diff --git a/examples/academic_paper_scripts/detxoify_lm/finetune_gpt.py b/examples/academic_paper_scripts/detxoify_lm/finetune_gpt.py new file mode 100644 index 0000000000..c3a9f69cae --- /dev/null +++ b/examples/academic_paper_scripts/detxoify_lm/finetune_gpt.py @@ -0,0 +1,159 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + +"""Fine-tune GPT""" + +import torch +from functools import partial +import os +import sys +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), + os.path.pardir, os.path.pardir))) +from megatron.training import get_args +from megatron.training import get_timers +from megatron.training import get_tokenizer +from megatron.training import print_rank_0 +from megatron.core import mpu +from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder +from megatron.core.datasets.blended_megatron_dataset_config import GPTDatasetConfig +from megatron.core.datasets.gpt_dataset import GPTDataset +from megatron.core.datasets.utils import get_blend_from_list +from megatron.legacy.model import GPTModel +from megatron.core.enums import ModelType +from megatron.training import pretrain +from megatron.training.utils import get_ltor_masks_and_position_ids +from megatron.training.utils import average_losses_across_data_parallel_group + +def model_provider(pre_process=True, post_process=True): + """Build the model.""" + + print_rank_0('building GPT model ...') + model = GPTModel( + num_tokentypes=0, + parallel_output=True, + pre_process=pre_process, + post_process=post_process + ) + return model + + +def get_batch(data_iterator): + """Generate a batch""" + args = get_args() + tokenizer = get_tokenizer() + + # Items and their type. + keys = ['text'] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = mpu.broadcast_data(keys, data, datatype) + + # Unpack. + tokens_ = data_b['text'].long() + labels = tokens_[:, 1:].contiguous() + tokens = tokens_[:, :-1].contiguous() + + # Get the masks and postition ids. + attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + tokens, + tokenizer.eod, + args.reset_position_ids, + args.reset_attention_mask, + args.eod_mask_loss) + + return tokens, labels, loss_mask, attention_mask, position_ids + +def loss_func(loss_mask, output_tensor): + losses = output_tensor.float() + loss_mask = loss_mask.view(-1).float() + loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum() + + # Reduce loss for logging. + averaged_loss = average_losses_across_data_parallel_group([loss]) + + return loss, {'lm loss': averaged_loss[0]} + + +def forward_step(data_iterator, model): + """Forward step.""" + args = get_args() + timers = get_timers() + + # Get the batch. + timers('batch-generator').start() + tokens, labels, loss_mask, attention_mask, position_ids = get_batch( + data_iterator) + timers('batch-generator').stop() + + output_tensor = model(tokens, position_ids, attention_mask, + labels=labels) + + return output_tensor, partial(loss_func, loss_mask) + + +def train_valid_test_datasets_provider(train_val_test_num_samples): + """Build train, valid, and test datasets.""" + args = get_args() + + print_rank_0('> building train, validation, and test datasets ' + 'for GPT ...') + train_ds, _, test_ds = BlendedMegatronDatasetBuilder( + GPTDataset, + train_val_test_num_samples, + lambda: True, + GPTDatasetConfig( + blend=get_blend_from_list(args.data_path), + split=args.split, + random_seed=args.seed, + sequence_length=args.seq_length, + path_to_cache=args.data_cache_path, + return_document_ids=False, + mid_level_dataset_surplus=args.mid_level_dataset_surplus, + ) + ).build() + print_rank_0("> finished creating finetuning GPT datasets ...") + + _, valid_ds, _ = BlendedMegatronDatasetBuilder( + GPTDataset, + train_val_test_num_samples, + lambda: True, + GPTDatasetConfig( + blend=get_blend_from_list(args.data_path2), + split="98,2,0", + random_seed=1234, + sequence_length=2048, + path_to_cache=args.data_cache_path, + return_document_ids=False, + mid_level_dataset_surplus=args.mid_level_dataset_surplus, + ) + ).build() + print_rank_0("> finished creating pretrained GPT datasets ...") + + return train_ds, valid_ds, test_ds + + +def add_validation_args(parser): + """Text generation arguments.""" + group = parser.add_argument_group(title='validation set') + group.add_argument('--data-path2', nargs='*', default=None, + help='Path to the validation dataset. Accepted format:' + '1) a single data path, 2) multiple datasets in the' + 'form: dataset1-weight dataset1-path dataset2-weight ' + 'dataset2-path ...') + group.add_argument('--eval-ppl', action='store_true', default=False) + group.add_argument('--stored_params', type=dict, default=dict()) + return parser + + +if __name__ == "__main__": + + pretrain(train_valid_test_datasets_provider, model_provider, + ModelType.encoder_or_decoder, + forward_step, args_defaults={'tokenizer_type': 'GPT2BPETokenizer'}, + extra_args_provider=add_validation_args,) diff --git a/examples/detxoify_lm/finetune_gpt_distributed-1.3b.sh b/examples/academic_paper_scripts/detxoify_lm/finetune_gpt_distributed-1.3b.sh similarity index 98% rename from examples/detxoify_lm/finetune_gpt_distributed-1.3b.sh rename to examples/academic_paper_scripts/detxoify_lm/finetune_gpt_distributed-1.3b.sh index 62a36c0b79..a212fbdf3f 100755 --- a/examples/detxoify_lm/finetune_gpt_distributed-1.3b.sh +++ b/examples/academic_paper_scripts/detxoify_lm/finetune_gpt_distributed-1.3b.sh @@ -43,7 +43,6 @@ python -m torch.distributed.run $DISTRIBUTED_ARGS \ --data-path2 ${DATA_BLEND} \ --vocab-file $VOCAB_FILE \ --merge-file $MERGE_FILE \ - --data-impl mmap \ --split 100,0,0 \ --distributed-backend nccl \ --lr-decay-style constant \ diff --git a/examples/detxoify_lm/generate-1.3b.sh b/examples/academic_paper_scripts/detxoify_lm/generate-1.3b.sh similarity index 100% rename from examples/detxoify_lm/generate-1.3b.sh rename to examples/academic_paper_scripts/detxoify_lm/generate-1.3b.sh diff --git a/examples/academic_paper_scripts/detxoify_lm/generate_samples_gpt.py b/examples/academic_paper_scripts/detxoify_lm/generate_samples_gpt.py new file mode 100644 index 0000000000..895a45d024 --- /dev/null +++ b/examples/academic_paper_scripts/detxoify_lm/generate_samples_gpt.py @@ -0,0 +1,260 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + +"""Sample Generate GPT""" +import json +import os +import sys +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), + os.path.pardir, os.path.pardir))) +import torch +from megatron.training import get_args +from megatron.training import get_tokenizer +from megatron.training import print_rank_0 +from megatron.training.checkpointing import load_checkpoint +from megatron.core import mpu +from megatron.training.initialize import initialize_megatron +from megatron.legacy.model import GPTModel +from megatron.training import get_model +from megatron.inference.text_generation import generate_and_post_process +from megatron.training.arguments import core_transformer_config_from_args +from megatron.core.models.gpt import GPTModel +from typing import Union +import megatron.legacy.model +from megatron.core.transformer.spec_utils import import_module +from megatron.training.arguments import core_transformer_config_from_args +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec, get_gpt_layer_local_spec + +def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megatron.legacy.model.GPTModel]: + """Builds the model. + + If you set the use_legacy_models to True, it will return the legacy GPT model and if not the core GPT model. + + Args: + pre_process (bool, optional): Set to true if you need to compute embedings. Defaults to True. + post_process (bool, optional): Set to true if you need to want to compute output logits/loss. Defaults to True. + + + Returns: + Union[GPTModel, megatron.legacy.model.GPTModel]: The returned model + """ + args = get_args() + + print_rank_0('building GPT model ...') + config = core_transformer_config_from_args(args) + + if args.use_legacy_models: + model = megatron.legacy.model.GPTModel( + config, + num_tokentypes=0, + parallel_output=False, + pre_process=pre_process, + post_process=post_process + ) + else: + if args.spec is None: + if args.transformer_impl == 'local': + transformer_layer_spec = get_gpt_layer_local_spec( + num_experts=args.num_experts, + moe_grouped_gemm=args.moe_grouped_gemm + ) + elif args.transformer_impl == 'transformer_engine': + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( + num_experts=args.num_experts, + moe_grouped_gemm=args.moe_grouped_gemm + ) + else: + raise ValueError(f"Invalid transformer_impl {args.transformer_impl}") + elif args.spec[0] == 'local': + transformer_layer_spec = get_gpt_layer_local_spec( + num_experts=args.num_experts, + moe_grouped_gemm=args.moe_grouped_gemm + ) + else: + transformer_layer_spec = import_module(args.spec) + + model = GPTModel( + config=config, + transformer_layer_spec=transformer_layer_spec, + vocab_size=args.padded_vocab_size, + max_sequence_length=args.max_position_embeddings, + pre_process=pre_process, + post_process=post_process, + fp16_lm_cross_entropy=args.fp16_lm_cross_entropy, + parallel_output=False, + share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights, + position_embedding_type=args.position_embedding_type, + rotary_percent=args.rotary_percent + ) + + return model + +def add_text_generate_args(parser): + """Text generation arguments.""" + group = parser.add_argument_group(title='text generation') + + group.add_argument("--temperature", type=float, default=1.0, + help='Sampling temperature.') + group.add_argument("--greedy", action='store_true', default=False, + help='Use greedy sampling.') + group.add_argument("--top_p", type=float, default=0.0, + help='Top p sampling.') + group.add_argument("--top_k", type=int, default=0, + help='Top k sampling.') + group.add_argument("--out-seq-length", type=int, default=1024, + help='Size of the output generated text.') + group.add_argument("--sample-input-file", type=str, default=None, + help='Get input from file instead of interactive mode, ' + 'each line is an input.') + group.add_argument("--sample-output-file", type=str, default=None, + help='Output file got from --sample-input-file') + group.add_argument("--num-samples", type=int, default=0, + help='Number of samples to generate unconditionally, ' + 'defaults to 0 and interactive conditional sampling') + group.add_argument("--genfile", type=str, + help='Output file when generating unconditionally') + return parser + +def generate_samples_unconditional(model): + args = get_args() + + if torch.distributed.get_rank() == 0: + cnt = 0 + num_samples = args.num_samples + from tqdm import tqdm + pbar = tqdm(total=num_samples) + + while True: + if torch.distributed.get_rank() == 0: + sentences = [''] * args.global_batch_size + print("global batch size", args.global_batch_size) + max_len = args.out_seq_length + resp_sentences, resp_sentences_seg, output_logits, \ + tokens = generate_and_post_process(model, prompts=sentences, + tokens_to_generate=max_len, + return_output_log_probs=False, + top_k_sampling=args.top_k, + top_p_sampling=args.top_p, + add_BOS=True, + temperature=1.0) + for prompt, generation, token in zip(sentences, resp_sentences, tokens): + datum = {'text': generation[len(prompt):], 'all_text': generation, 'prompt': prompt, 'id': cnt} + yield datum + cnt += 1 + pbar.update() + if cnt >= num_samples: + break + + if cnt >= num_samples: + pbar.close() + break + else: + generate_and_post_process(model) + + +def generate_samples_conditional(model): + args = get_args() + + if torch.distributed.get_rank() == 0: + num_samples = args.num_samples + cnt = 0 + from tqdm import tqdm + pbar = tqdm(total=num_samples) + + fname = open(args.sample_input_file, "r") + lines = fname.readlines() + all_raw_text = [json.loads(line)['prompt']['text'] for line in lines] + input_count = len(all_raw_text) + input_pos = 0 + + while True: + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + sentences = [] + print("global batch size", args.global_batch_size) + for _ in range(args.global_batch_size): + if input_pos >= input_count: + print(f"input pos: {input_pos}, input count: {input_count}") + raw_text = "EMPTY TEXT" + else: + raw_text = all_raw_text[input_pos] + input_pos += 1 + sentences.append(raw_text) + + max_len = args.out_seq_length + resp_sentences, resp_sentences_seg, output_logits, \ + tokens = generate_and_post_process(model, prompts=sentences, + tokens_to_generate=max_len, + return_output_log_probs=False, + top_k_sampling=args.top_k, + top_p_sampling=args.top_p, + add_BOS=False, + temperature=1.0) + for prompt, generation, token in zip(sentences, resp_sentences, tokens): + datum = {'text': generation[len(prompt):], 'all_text': generation, 'prompt': prompt, 'id': cnt} + yield datum + cnt += 1 + pbar.update() + if cnt >= num_samples: + break + + if cnt >= num_samples: + pbar.close() + break + else: + generate_and_post_process(model) + + +def generate_and_write_samples_unconditional(model): + args = get_args() + assert args.genfile is not None + with open(args.genfile, 'w') as f: + for datum in generate_samples_unconditional(model): + if torch.distributed.get_rank() == 0: + f.write(json.dumps(datum) + '\n') + + +def generate_and_write_samples_conditional(model): + args = get_args() + if args.sample_output_file is None: + sample_output_file = args.sample_input_file + ".out" + print('`sample-output-file` not specified, setting ' + 'it to {}'.format(sample_output_file)) + else: + sample_output_file = args.sample_output_file + with open(sample_output_file, 'w') as f: + for datum in generate_samples_conditional(model): + if torch.distributed.get_rank() == 0: + f.write(json.dumps(datum) + '\n') + + +def main(): + """Main program.""" + + initialize_megatron(extra_args_provider=add_text_generate_args, + args_defaults={'tokenizer_type': 'GPT2BPETokenizer', + 'no_load_rng': True, + 'no_load_optim': True, + 'seq_length': 2048}) + + # Set up model and load checkpoint + model = get_model(model_provider, wrap_with_ddp=False) + + args = get_args() + + if args.load is not None: + _ = load_checkpoint(model, None, None) + model = model[0] + + # Generate samples. + if args.sample_input_file != None: + print(f"{args.sample_input_file}") + generate_and_write_samples_conditional(model) + else: + generate_and_write_samples_unconditional(model) + + +if __name__ == "__main__": + + main() diff --git a/examples/detxoify_lm/perspective_api.py b/examples/academic_paper_scripts/detxoify_lm/perspective_api.py similarity index 100% rename from examples/detxoify_lm/perspective_api.py rename to examples/academic_paper_scripts/detxoify_lm/perspective_api.py diff --git a/examples/detxoify_lm/self_generation/selfgenerate-1.3b-unconditional.sh b/examples/academic_paper_scripts/detxoify_lm/self_generation/selfgenerate-1.3b-unconditional.sh similarity index 100% rename from examples/detxoify_lm/self_generation/selfgenerate-1.3b-unconditional.sh rename to examples/academic_paper_scripts/detxoify_lm/self_generation/selfgenerate-1.3b-unconditional.sh diff --git a/examples/msdp/README.md b/examples/academic_paper_scripts/msdp/README.md similarity index 100% rename from examples/msdp/README.md rename to examples/academic_paper_scripts/msdp/README.md diff --git a/examples/msdp/data_processing.sh b/examples/academic_paper_scripts/msdp/data_processing.sh similarity index 100% rename from examples/msdp/data_processing.sh rename to examples/academic_paper_scripts/msdp/data_processing.sh diff --git a/examples/msdp/eval_knwl_generation.sh b/examples/academic_paper_scripts/msdp/eval_knwl_generation.sh similarity index 100% rename from examples/msdp/eval_knwl_generation.sh rename to examples/academic_paper_scripts/msdp/eval_knwl_generation.sh diff --git a/examples/msdp/eval_resp_generation.sh b/examples/academic_paper_scripts/msdp/eval_resp_generation.sh similarity index 100% rename from examples/msdp/eval_resp_generation.sh rename to examples/academic_paper_scripts/msdp/eval_resp_generation.sh diff --git a/examples/msdp/prep_resp_gen.sh b/examples/academic_paper_scripts/msdp/prep_resp_gen.sh similarity index 100% rename from examples/msdp/prep_resp_gen.sh rename to examples/academic_paper_scripts/msdp/prep_resp_gen.sh diff --git a/examples/msdp/prompt_knwl_gen.sh b/examples/academic_paper_scripts/msdp/prompt_knwl_gen.sh similarity index 100% rename from examples/msdp/prompt_knwl_gen.sh rename to examples/academic_paper_scripts/msdp/prompt_knwl_gen.sh diff --git a/examples/msdp/prompt_resp_gen.sh b/examples/academic_paper_scripts/msdp/prompt_resp_gen.sh similarity index 100% rename from examples/msdp/prompt_resp_gen.sh rename to examples/academic_paper_scripts/msdp/prompt_resp_gen.sh diff --git a/examples/sc21/CONFIG.sh b/examples/academic_paper_scripts/sc21/CONFIG.sh similarity index 100% rename from examples/sc21/CONFIG.sh rename to examples/academic_paper_scripts/sc21/CONFIG.sh diff --git a/examples/academic_paper_scripts/sc21/README.md b/examples/academic_paper_scripts/sc21/README.md new file mode 100644 index 0000000000..ec922d153d --- /dev/null +++ b/examples/academic_paper_scripts/sc21/README.md @@ -0,0 +1,50 @@ +# Reproducing Figures in SC21 Paper + + +This directory contains some of the scripts that were used to produce the +results in the [Megatron paper](https://arxiv.org/pdf/2104.04473.pdf) that is +to appear at [SuperComputing 2021](https://sc21.supercomputing.org/). These +scripts use [Slurm](https://slurm.schedmd.com/documentation.html) with the +[pyxis plugin](https://github.com/NVIDIA/pyxis), but can be modified for other +schedulers as well. + + +## Git commit + +To replicate these results use Megatron-LM commit: 6985e58938d40ad91ac07b0fddcfad8132e1447e + + +## Setup + +All the cluster-dependent variables are in [`CONFIG.sh`](./CONFIG.sh). Please +update the unspecified values (in angle brackets `<...>`) before launching any +scripts. + + + +## Scripts + +Below is a list of scripts that can be used to reproduce various figures in our +[paper](https://arxiv.org/pdf/2104.04473.pdf): + +* [run_table_1.sh](./run_table_1.sh): Table 1 showing weak-scaling throughput +for GPT models ranging from 1 billion to 1 trillion parameters. +* [run_figure_11.sh](./run_figure_11.sh): Figure 11 showing the weak-scaling +performance of pipeline parallelism. +* [run_figure_12.sh](./run_figure_12.sh): Figure 12 showing the effect of +the interleaved schedule on a 175B GPT model. +* [run_figure_13.sh](./run_figure_13.sh): Figure 13 showing the effect of +different degrees of pipeline and tensor model parallelism on a model with +162.2 billion parameters. +* [run_figure_14.sh](./run_figure_14.sh): Figure 14 showing the effect of +different degrees of data and pipeline model parallelism on a model with +5.9 billion parameters. +* [run_figure_15.sh](./run_figure_15.sh): Figure 15 showing the effect of +different degrees of data and tensor model parallelism on a model with +5.9 billion parameters. +* [run_figure_16.sh](./run_figure_16.sh): Figure 16 showing the effect of +microbatch size. +* [run_figure_17.sh](./run_figure_17.sh): Figure 17 showing the effect of +activation recomputation. +* [run_figure_18.sh](./run_figure_18.sh): Figure 18 showing the effect of +the scatter-gather communication optimization. diff --git a/examples/sc21/SBATCH.sh b/examples/academic_paper_scripts/sc21/SBATCH.sh similarity index 100% rename from examples/sc21/SBATCH.sh rename to examples/academic_paper_scripts/sc21/SBATCH.sh diff --git a/examples/sc21/SRUN.sh b/examples/academic_paper_scripts/sc21/SRUN.sh similarity index 100% rename from examples/sc21/SRUN.sh rename to examples/academic_paper_scripts/sc21/SRUN.sh diff --git a/examples/sc21/run_figure_11.sh b/examples/academic_paper_scripts/sc21/run_figure_11.sh similarity index 100% rename from examples/sc21/run_figure_11.sh rename to examples/academic_paper_scripts/sc21/run_figure_11.sh diff --git a/examples/sc21/run_figure_12.sh b/examples/academic_paper_scripts/sc21/run_figure_12.sh similarity index 100% rename from examples/sc21/run_figure_12.sh rename to examples/academic_paper_scripts/sc21/run_figure_12.sh diff --git a/examples/sc21/run_figure_13.sh b/examples/academic_paper_scripts/sc21/run_figure_13.sh similarity index 100% rename from examples/sc21/run_figure_13.sh rename to examples/academic_paper_scripts/sc21/run_figure_13.sh diff --git a/examples/sc21/run_figure_14.sh b/examples/academic_paper_scripts/sc21/run_figure_14.sh similarity index 100% rename from examples/sc21/run_figure_14.sh rename to examples/academic_paper_scripts/sc21/run_figure_14.sh diff --git a/examples/sc21/run_figure_15.sh b/examples/academic_paper_scripts/sc21/run_figure_15.sh similarity index 100% rename from examples/sc21/run_figure_15.sh rename to examples/academic_paper_scripts/sc21/run_figure_15.sh diff --git a/examples/sc21/run_figure_16.sh b/examples/academic_paper_scripts/sc21/run_figure_16.sh similarity index 100% rename from examples/sc21/run_figure_16.sh rename to examples/academic_paper_scripts/sc21/run_figure_16.sh diff --git a/examples/sc21/run_figure_17.sh b/examples/academic_paper_scripts/sc21/run_figure_17.sh similarity index 100% rename from examples/sc21/run_figure_17.sh rename to examples/academic_paper_scripts/sc21/run_figure_17.sh diff --git a/examples/sc21/run_figure_18.sh b/examples/academic_paper_scripts/sc21/run_figure_18.sh similarity index 100% rename from examples/sc21/run_figure_18.sh rename to examples/academic_paper_scripts/sc21/run_figure_18.sh diff --git a/examples/sc21/run_table_1.sh b/examples/academic_paper_scripts/sc21/run_table_1.sh similarity index 100% rename from examples/sc21/run_table_1.sh rename to examples/academic_paper_scripts/sc21/run_table_1.sh diff --git a/examples/bert/README.md b/examples/bert/README.md new file mode 100644 index 0000000000..6c1fe95bf0 --- /dev/null +++ b/examples/bert/README.md @@ -0,0 +1,53 @@ +# BERT MODEL + +## Table of contents +- [1. Training Setup](#1-training-setup) +- [2. Configurations](#2-configurations) + +## 1. Training setup + + +To run the model using a docker container run it as follows +``` +PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:24.01-py3 +CHECKPOINT_PATH="" # +TENSORBOARD_LOGS_PATH=""# +VOCAB_FILE="" #//bert-vocab.txt +DATA_PATH="" #_text_document + +docker run \ + --gpus=all \ + --ipc=host \ + --workdir /workspace/megatron-lm \ + -v /path/to/data:/path/to/data \ + -v /path/to/megatron-lm:/workspace/megatron-lm \ + megatron-lm nvcr.io/nvidia/pytorch:24.01-py3 \ + bash examples/bert/train_bert_340m_distributed.sh $CHECKPOINT_PATH $TENSORBOARD_LOGS_PATH $VOCAB_FILE $DATA_PATH " + +``` +NOTE: Depending on the environment you are running it the above command might like slightly different. + + +## 2. Configurations + +The example in this folder shows you how to run 340m large model. There are other configs you could run as well + +### 4B +``` + --num-layers 48 \ + --hidden-size 2560 \ + --num-attention-heads 32 \ + --tensor-model-parallel-size 1 \ + --pipeline-model-parallel-size 1 \ + +``` + +### 20B +``` + --num-layers 48 \ + --hidden-size 6144 \ + --num-attention-heads 96 \ + --tensor-model-parallel-size 4 \ + --pipeline-model-parallel-size 4 \ + +``` \ No newline at end of file diff --git a/examples/bert/train_bert_340m_distributed.sh b/examples/bert/train_bert_340m_distributed.sh new file mode 100644 index 0000000000..f0d9c87c8b --- /dev/null +++ b/examples/bert/train_bert_340m_distributed.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +# Runs the "340M" parameter model (Bert - Large) + +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +GPUS_PER_NODE=8 +# Change for multinode config +MASTER_ADDR=localhost +MASTER_PORT=6000 +NUM_NODES=1 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) + +CHECKPOINT_PATH=$1 # +TENSORBOARD_LOGS_PATH=$2 # +VOCAB_FILE=$3 #/bert-vocab.json +DATA_PATH=$4 #_text_document + +DISTRIBUTED_ARGS=( + --nproc_per_node $GPUS_PER_NODE + --nnodes $NUM_NODES + --master_addr $MASTER_ADDR + --master_port $MASTER_PORT +) + +BERT_MODEL_ARGS=( + --num-layers 24 + --hidden-size 1024 + --num-attention-heads 16 + --seq-length 512 + --max-position-embeddings 512 + --attention-backend auto # Can use (flash/fused/unfused/local) +) + +TRAINING_ARGS=( + --micro-batch-size 4 + --global-batch-size 32 + --train-iters 1000000 + --weight-decay 1e-2 + --clip-grad 1.0 + --fp16 + --lr 0.0001 + --lr-decay-iters 990000 + --lr-decay-style linear + --min-lr 1.0e-5 + --weight-decay 1e-2 + --lr-warmup-fraction .01 + --clip-grad 1.0 +) + +MODEL_PARALLEL_ARGS=( + --tensor-model-parallel-size 8 + --pipeline-model-parallel-size 16 +) + +DATA_ARGS=( + --data-path $DATA_PATH + --vocab-file $VOCAB_FILE + --split 949,50,1 +) + +EVAL_AND_LOGGING_ARGS=( + --log-interval 100 + --save-interval 10000 + --eval-interval 1000 + --save $CHECKPOINT_PATH + --load $CHECKPOINT_PATH + --eval-iters 10 + --tensorboard-dir $TENSORBOARD_LOGS_PATH +) + +torchrun ${DISTRIBUTED_ARGS[@]} pretrain_bert.py \ + ${BERT_MODEL_ARGS[@]} \ + ${TRAINING_ARGS[@]} \ + ${MODEL_PARALLEL_ARGS[@]} \ + ${DATA_ARGS[@]} \ + ${EVAL_AND_LOGGING_ARGS[@]} + \ No newline at end of file diff --git a/examples/detxoify_lm/finetune_gpt.py b/examples/detxoify_lm/finetune_gpt.py deleted file mode 100644 index 70b781e0ee..0000000000 --- a/examples/detxoify_lm/finetune_gpt.py +++ /dev/null @@ -1,145 +0,0 @@ -# coding=utf-8 -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. - - -"""Fine-tune GPT""" - -import torch -from functools import partial -import os -import sys -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), - os.path.pardir, os.path.pardir))) -from megatron import get_args -from megatron import get_timers -from megatron import get_tokenizer -from megatron import print_rank_0 -from megatron.core import mpu -from megatron.data.blendable_dataset import BlendableDataset -from megatron.data.gpt_dataset import build_train_valid_test_datasets -from megatron.model import GPTModel -from megatron.core.enums import ModelType -from megatron.training import pretrain -from megatron.utils import get_ltor_masks_and_position_ids -from megatron.utils import average_losses_across_data_parallel_group - -def model_provider(pre_process=True, post_process=True): - """Build the model.""" - - print_rank_0('building GPT model ...') - model = GPTModel( - num_tokentypes=0, - parallel_output=True, - pre_process=pre_process, - post_process=post_process - ) - return model - - -def get_batch(data_iterator): - """Generate a batch""" - args = get_args() - tokenizer = get_tokenizer() - - # Items and their type. - keys = ['text'] - datatype = torch.int64 - - # Broadcast data. - if data_iterator is not None: - data = next(data_iterator) - else: - data = None - data_b = mpu.broadcast_data(keys, data, datatype) - - # Unpack. - tokens_ = data_b['text'].long() - labels = tokens_[:, 1:].contiguous() - tokens = tokens_[:, :-1].contiguous() - - # Get the masks and postition ids. - attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( - tokens, - tokenizer.eod, - args.reset_position_ids, - args.reset_attention_mask, - args.eod_mask_loss) - - return tokens, labels, loss_mask, attention_mask, position_ids - -def loss_func(loss_mask, output_tensor): - losses = output_tensor.float() - loss_mask = loss_mask.view(-1).float() - loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum() - - # Reduce loss for logging. - averaged_loss = average_losses_across_data_parallel_group([loss]) - - return loss, {'lm loss': averaged_loss[0]} - - -def forward_step(data_iterator, model): - """Forward step.""" - args = get_args() - timers = get_timers() - - # Get the batch. - timers('batch-generator').start() - tokens, labels, loss_mask, attention_mask, position_ids = get_batch( - data_iterator) - timers('batch-generator').stop() - - output_tensor = model(tokens, position_ids, attention_mask, - labels=labels) - - return output_tensor, partial(loss_func, loss_mask) - - -def train_valid_test_datasets_provider(train_val_test_num_samples): - """Build train, valid, and test datasets.""" - args = get_args() - - print_rank_0('> building train, validation, and test datasets ' - 'for GPT ...') - train_ds, valid_ds1, test_ds = build_train_valid_test_datasets( - data_prefix=args.data_path, - data_impl=args.data_impl, - splits_string=args.split, - train_valid_test_num_samples=train_val_test_num_samples, - seq_length=args.seq_length, - seed=args.seed, - skip_warmup=(not args.mmap_warmup)) - print_rank_0("> finished creating finetuning GPT datasets ...") - - _, valid_ds, _ = build_train_valid_test_datasets( - data_prefix=args.data_path2, - data_impl="mmap", - splits_string="98,2,0", - train_valid_test_num_samples=train_val_test_num_samples, - seq_length=2048, - seed=1234, - skip_warmup=(not args.mmap_warmup)) - print_rank_0("> finished creating pretrained GPT datasets ...") - - return train_ds, valid_ds, test_ds - - -def add_validation_args(parser): - """Text generation arguments.""" - group = parser.add_argument_group(title='validation set') - group.add_argument('--data-path2', nargs='*', default=None, - help='Path to the validation dataset. Accepted format:' - '1) a single data path, 2) multiple datasets in the' - 'form: dataset1-weight dataset1-path dataset2-weight ' - 'dataset2-path ...') - group.add_argument('--eval-ppl', action='store_true', default=False) - group.add_argument('--stored_params', type=dict, default=dict()) - return parser - - -if __name__ == "__main__": - - pretrain(train_valid_test_datasets_provider, model_provider, - ModelType.encoder_or_decoder, - forward_step, args_defaults={'tokenizer_type': 'GPT2BPETokenizer'}, - extra_args_provider=add_validation_args,) diff --git a/examples/detxoify_lm/generate_samples_gpt.py b/examples/detxoify_lm/generate_samples_gpt.py deleted file mode 100644 index 47e1590ea5..0000000000 --- a/examples/detxoify_lm/generate_samples_gpt.py +++ /dev/null @@ -1,199 +0,0 @@ -# coding=utf-8 -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. - - -"""Sample Generate GPT""" -import json -import os -import sys -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), - os.path.pardir, os.path.pardir))) -import torch -from megatron import get_args -from megatron import get_tokenizer -from megatron import print_rank_0 -from megatron.checkpointing import load_checkpoint -from megatron.core import mpu -from megatron.initialize import initialize_megatron -from megatron.model import GPTModel -from megatron.training import get_model -from megatron.text_generation import generate_and_post_process - - -def model_provider(pre_process=True, post_process=True): - """Build the model.""" - - print_rank_0('building GPT model ...') - model = GPTModel(num_tokentypes=0, parallel_output=False, - pre_process=pre_process, post_process=post_process) - - return model - -def add_text_generate_args(parser): - """Text generation arguments.""" - group = parser.add_argument_group(title='text generation') - - group.add_argument("--temperature", type=float, default=1.0, - help='Sampling temperature.') - group.add_argument("--greedy", action='store_true', default=False, - help='Use greedy sampling.') - group.add_argument("--top_p", type=float, default=0.0, - help='Top p sampling.') - group.add_argument("--top_k", type=int, default=0, - help='Top k sampling.') - group.add_argument("--out-seq-length", type=int, default=1024, - help='Size of the output generated text.') - group.add_argument("--sample-input-file", type=str, default=None, - help='Get input from file instead of interactive mode, ' - 'each line is an input.') - group.add_argument("--sample-output-file", type=str, default=None, - help='Output file got from --sample-input-file') - group.add_argument("--num-samples", type=int, default=0, - help='Number of samples to generate unconditionally, ' - 'defaults to 0 and interactive conditional sampling') - group.add_argument("--genfile", type=str, - help='Output file when generating unconditionally') - return parser - -def generate_samples_unconditional(model): - args = get_args() - - if torch.distributed.get_rank() == 0: - cnt = 0 - num_samples = args.num_samples - from tqdm import tqdm - pbar = tqdm(total=num_samples) - - while True: - if torch.distributed.get_rank() == 0: - sentences = [''] * args.global_batch_size - print("global batch size", args.global_batch_size) - max_len = args.out_seq_length - resp_sentences, resp_sentences_seg, output_logits, \ - tokens = generate_and_post_process(model, prompts=sentences, - tokens_to_generate=max_len, - return_output_log_probs=False, - top_k_sampling=args.top_k, - top_p_sampling=args.top_p, - add_BOS=True, - temperature=1.0) - for prompt, generation, token in zip(sentences, resp_sentences, tokens): - datum = {'text': generation[len(prompt):], 'all_text': generation, 'prompt': prompt, 'id': cnt} - yield datum - cnt += 1 - pbar.update() - if cnt >= num_samples: - break - - if cnt >= num_samples: - pbar.close() - break - else: - generate_and_post_process(model) - - -def generate_samples_conditional(model): - args = get_args() - - if torch.distributed.get_rank() == 0: - num_samples = args.num_samples - cnt = 0 - from tqdm import tqdm - pbar = tqdm(total=num_samples) - - fname = open(args.sample_input_file, "r") - lines = fname.readlines() - all_raw_text = [json.loads(line)['prompt']['text'] for line in lines] - input_count = len(all_raw_text) - input_pos = 0 - - while True: - torch.distributed.barrier() - if torch.distributed.get_rank() == 0: - sentences = [] - print("global batch size", args.global_batch_size) - for _ in range(args.global_batch_size): - if input_pos >= input_count: - print(f"input pos: {input_pos}, input count: {input_count}") - raw_text = "EMPTY TEXT" - else: - raw_text = all_raw_text[input_pos] - input_pos += 1 - sentences.append(raw_text) - - max_len = args.out_seq_length - resp_sentences, resp_sentences_seg, output_logits, \ - tokens = generate_and_post_process(model, prompts=sentences, - tokens_to_generate=max_len, - return_output_log_probs=False, - top_k_sampling=args.top_k, - top_p_sampling=args.top_p, - add_BOS=False, - temperature=1.0) - for prompt, generation, token in zip(sentences, resp_sentences, tokens): - datum = {'text': generation[len(prompt):], 'all_text': generation, 'prompt': prompt, 'id': cnt} - yield datum - cnt += 1 - pbar.update() - if cnt >= num_samples: - break - - if cnt >= num_samples: - pbar.close() - break - else: - generate_and_post_process(model) - - -def generate_and_write_samples_unconditional(model): - args = get_args() - assert args.genfile is not None - with open(args.genfile, 'w') as f: - for datum in generate_samples_unconditional(model): - if torch.distributed.get_rank() == 0: - f.write(json.dumps(datum) + '\n') - - -def generate_and_write_samples_conditional(model): - args = get_args() - if args.sample_output_file is None: - sample_output_file = args.sample_input_file + ".out" - print('`sample-output-file` not specified, setting ' - 'it to {}'.format(sample_output_file)) - else: - sample_output_file = args.sample_output_file - with open(sample_output_file, 'w') as f: - for datum in generate_samples_conditional(model): - if torch.distributed.get_rank() == 0: - f.write(json.dumps(datum) + '\n') - - -def main(): - """Main program.""" - - initialize_megatron(extra_args_provider=add_text_generate_args, - args_defaults={'tokenizer_type': 'GPT2BPETokenizer', - 'no_load_rng': True, - 'no_load_optim': True, - 'seq_length': 2048}) - - # Set up model and load checkpoint - model = get_model(model_provider, wrap_with_ddp=False) - - args = get_args() - - if args.load is not None: - _ = load_checkpoint(model, None, None) - model = model[0] - - # Generate samples. - if args.sample_input_file != None: - print(f"{args.sample_input_file}") - generate_and_write_samples_conditional(model) - else: - generate_and_write_samples_unconditional(model) - - -if __name__ == "__main__": - - main() diff --git a/examples/evaluate_retriever_nq.sh b/examples/evaluate_retriever_nq.sh deleted file mode 100644 index 16e937f4fd..0000000000 --- a/examples/evaluate_retriever_nq.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Evaluate natural question test data given Wikipedia embeddings and pretrained -# ICT model or a finetuned model for Natural Question task - -# Datasets can be downloaded from the following link: -# https://github.com/facebookresearch/DPR/blob/master/data/download_data.py - -EVIDENCE_DATA_DIR= -EMBEDDING_PATH= -CHECKPOINT_PATH= - -QA_FILE= - -python tasks/main.py \ - --task RETRIEVER-EVAL \ - --tokenizer-type BertWordPieceLowerCase \ - --num-layers 12 \ - --hidden-size 768 \ - --num-attention-heads 12 \ - --tensor-model-parallel-size 1 \ - --micro-batch-size 128 \ - --activations-checkpoint-method uniform \ - --seq-length 512 \ - --max-position-embeddings 512 \ - --load ${CHECKPOINT_PATH} \ - --evidence-data-path ${EVIDENCE_DATA_DIR} \ - --embedding-path ${EMBEDDING_PATH} \ - --retriever-seq-length 256 \ - --vocab-file bert-vocab.txt\ - --qa-data-test ${QA_FILE} \ - --faiss-use-gpu \ - --retriever-report-topk-accuracies 1 5 20 100 \ - --fp16 \ - --indexer-log-interval 1000 \ - --indexer-batch-size 128 - - diff --git a/examples/evaluate_zeroshot_gpt.sh b/examples/evaluate_zeroshot_gpt.sh deleted file mode 100755 index f8c38dc01d..0000000000 --- a/examples/evaluate_zeroshot_gpt.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -WORLD_SIZE=8 - -DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ - --nnodes 1 \ - --node_rank 0 \ - --master_addr localhost \ - --master_port 6000" - -TASK="LAMBADA" - -VALID_DATA= -VOCAB_FILE=gpt2-vocab.json -MERGE_FILE=gpt2-merges.txt -CHECKPOINT=checkpoints/gpt2_345m - - -python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \ - --task $TASK \ - --valid-data $VALID_DATA \ - --tokenizer-type GPT2BPETokenizer \ - --strict-lambada \ - --vocab-file $VOCAB_FILE \ - --merge-file $MERGE_FILE \ - --load $CHECKPOINT \ - --tensor-model-parallel-size 1 \ - --num-layers 24 \ - --hidden-size 1024 \ - --num-attention-heads 16 \ - --batch-size 8 \ - --activations-checkpoint-method uniform \ - --seq-length 1024 \ - --max-position-embeddings 1024 \ - --log-interval 10 \ - --fp16 \ - --no-load-optim \ - --no-load-rng diff --git a/examples/export/README.md b/examples/export/README.md new file mode 100644 index 0000000000..bdd07da263 --- /dev/null +++ b/examples/export/README.md @@ -0,0 +1,10 @@ +# Megatron Core Export + +This module is used to export megatron core models to different inference frameworks. +Currently we support TRTLLM export . In the future we will be adding support for VLLM etc. + +## PTQ AND EXPORT +Follow the examples of [TensorRT Model Optimizer](../post_training/modelopt) to perform post training quantization, followed by an export to a HF-like checkpoint for TensorRT-LLM, vLLM, and SGLang deployment. + +# TRTLLM EXPORT +Follow the instructions in [trtllm_export](./trtllm_export/) to do export to TRTLLM checkpoint format alone. diff --git a/examples/export/trtllm_export/README.md b/examples/export/trtllm_export/README.md new file mode 100644 index 0000000000..52cad78583 --- /dev/null +++ b/examples/export/trtllm_export/README.md @@ -0,0 +1,161 @@ +# Megatron Core To TRTLLM Export Documentation +This guide will walk you through how you can use the megatron core export for exporting models to trtllm format + +### Contents +- [Megatron Core To TRTLLM Export Documentation](#megatron-core-to-trtllm-export-documentation) +- [Contents](#contents) + - [1. Quick Start](#1-quick-start) + - [1.1 Understanding The Code](#11-understanding-the-code) + - [1.2 Running The Code](#12-running-the-code) + - [2. GPU Export](#2-gpu-export) + - [3. Future work](#4-future-work) + +#### 1. Quick Start +This will walk you through the flow of converting an mcore gpt model to trtllm format using single device mode. The file can be found at [gpt_single_device_cpu_export.py](./single_device_export/gpt_single_device_cpu_export.py) + +NOTE: For faster performance, if your entire model will fit into gpu memory, pre transfer the model state dict to gpu and then call the get_trtllm_pretrained_config_and_model_weights function. + +
+ +##### 1.1 Understanding The Code +***STEP 1 - We initialize model parallel and other default arguments*** +We initalize tp and pp to 1 so that we can get the full model state dict on cpu +```python + initialize_distributed(tensor_model_parallel_size=1, pipeline_model_parallel_size=1) +``` + +***STEP 2 - We load the model using the model_provider_function*** +NOTE: We create a simple gpt model + +```python + transformer_config = TransformerConfig( + num_layers=2, + hidden_size=64, # Needs to be atleast 32 times num_attn_heads + num_attention_heads=2, + use_cpu_initialization=True, + pipeline_dtype=torch.float32, + ) + + gpt_model = GPTModel( + config=transformer_config, + transformer_layer_spec=get_gpt_layer_local_spec(), + vocab_size=100, + max_sequence_length=_SEQUENCE_LENGTH, + ) + + # Optionally you can also load a model using this code + # sharded_state_dict=gpt_model.sharded_state_dict(prefix='') + # checkpoint = dist_checkpointing.load(sharded_state_dict=sharded_state_dict, checkpoint_dir=checkpoint_path) + # gpt_model.load_state_dict(checkpoint) + +``` + +***STEP 3 - Instantiate the TRTLLM Helper*** +We instantiate the [TRTLLM Helper](../../../megatron/core/export/trtllm/trtllm_helper.py) For the GPT model we instantiate trtllm_helper as shown below. +```python + if hasattr(gpt_model, "rotary_pos_emb"): + seq_len_interpolation_factor = gpt_model.rotary_pos_emb.seq_len_interpolation_factor + + trtllm_helper = TRTLLMHelper( + transformer_config=gpt_model.config, + model_type=ModelType.gpt, + position_embedding_type = gpt_model.position_embedding_type, + max_position_embeddings = gpt_model.max_position_embeddings, + rotary_percentage = gpt_model.rotary_percent, + rotary_base = gpt_model.rotary_base, + moe_tp_mode = 2, + multi_query_mode = False, + activation = "gelu", + seq_len_interpolation_factor = seq_len_interpolation_factor, + share_embeddings_and_output_weights=gpt_model.share_embeddings_and_output_weights + ) +``` + +***STEP 4 - Get the TRTLLM Weights and configs*** +To convert model weights to trtllm weights and configs, we use the [single_device_converter](../../../megatron/core/export/trtllm/trtllm_weights_converter/single_device_trtllm_model_weights_converter.py). We pass as inputs the model state dict, and export config. In this example we use inference tp size as 2 for the export. + +```python + model_state_dict={} + for key , val in gpt_model.state_dict().items(): + # val is non for _extra_state layers . We filter it out + if val is not None: + model_state_dict[key] = val + + export_config = ExportConfig(inference_tp_size = 2) + weight_list, config_list = trtllm_helper.get_trtllm_pretrained_config_and_model_weights( + model_state_dict= model_state_dict, + dtype = DataType.bfloat16, + export_config=export_config + ) +``` + +***STEP 5 - Build the TRTLLM Engine*** +Following code is used to build the TRTLLM Engine. + +```python + for trtllm_model_weights, trtllm_model_config in zip(weight_list, config_list): + trtllm_helper.build_and_save_engine( + max_input_len=256, + max_output_len=256, + max_batch_size=8, + engine_dir='/opt/megatron-lm/engine', + trtllm_model_weights=trtllm_model_weights, + trtllm_model_config=trtllm_model_config, + lora_ckpt_list=None, + use_lora_plugin=None, + max_lora_rank=64, + lora_target_modules=None, + max_prompt_embedding_table_size=0, + paged_kv_cache=True, + remove_input_padding=True, + paged_context_fmha=False, + use_refit=False, + max_num_tokens=None, + max_seq_len=512, + opt_num_tokens=None, + max_beam_width=1, + tokens_per_block=128, + multiple_profiles=False, + gpt_attention_plugin="auto", + gemm_plugin="auto", + ) +``` +
+ +##### 1.2 Running The Code +An example run script is shown below. + +``` +# In a workstation +MLM_PATH=/path/to/megatron-lm +CONTAINER_IMAGE=gitlab-master.nvidia.com:5005/dl/joc/nemo-ci/trtllm_0.12/train:pipe.17669124-x86 + +docker run -it --gpus=all --ipc=host -v $MLM_PATH/:/opt/megatron-lm $CONTAINER_IMAGE bash + +# Inside the container run the following. + +cd /opt/megatron-lm/ + +CUDA_VISIBLE_DEVICES=0 torchrun --nproc-per-node 1 examples/export/trtllm_export/single_device_export/gpt_single_device_cpu_export.py +``` + +
+ +#### 2. GPU Export +You can use the [gpt_distributed_gpu_export.py](./distributed_export/gpt_distributed_gpu_export.py) to run a more optimized on device distributed. version of trtllm export. Internally this uses the [distributed_converter](../../../megatron/core/export/trtllm/trtllm_weights_converter/distributed_trtllm_model_weights_converter.py) to convert model weights on device. +In the single device version you collect all the model weights on CPU/GPU, convert it to trtllm format, and then store the engine back on disk. In the GPU version you load each individual state dict on the gpus, convert it on the device itself and store the engine on disk. + +To run the gpu version + +``` +CUDA_VISIBLE_DEVICES=0,1 torchrun --nproc-per-node 2 examples/export/trtllm_export/distributed_export/gpt_distributed_gpu_export.py +``` + +
+ +#### 3. Future work +The following are planned for the future releases . +* Pipeline parallellism for export (Work in progress) +* GPU Export for more models (Work in progress for some models) +* Refit functionality +* VLLM Support \ No newline at end of file diff --git a/examples/export/trtllm_export/distributed_export/gpt_distributed_gpu_export.py b/examples/export/trtllm_export/distributed_export/gpt_distributed_gpu_export.py new file mode 100644 index 0000000000..57d44f9f62 --- /dev/null +++ b/examples/export/trtllm_export/distributed_export/gpt_distributed_gpu_export.py @@ -0,0 +1,117 @@ +import os +import torch +from megatron.core import parallel_state +from megatron.core import dist_checkpointing +from megatron.core.export.model_type import ModelType +from megatron.core.export.data_type import DataType +from megatron.core.export.trtllm.trtllm_helper import TRTLLMHelper +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.models.gpt.gpt_model import GPTModel +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_local_spec + + +_SEQUENCE_LENGTH = 64 +_VOCAB_SIZE = 256 + +def initialize_distributed(tensor_model_parallel_size=1, pipeline_model_parallel_size=1): + parallel_state.destroy_model_parallel() + + # Torch setup for distributed training + rank = int(os.environ['LOCAL_RANK']) + world_size = torch.cuda.device_count() + torch.cuda.set_device(rank) + torch.distributed.init_process_group(world_size=world_size, rank=rank) + + # Megatron core distributed training initialization + parallel_state.initialize_model_parallel(tensor_model_parallel_size = tensor_model_parallel_size, pipeline_model_parallel_size=pipeline_model_parallel_size) + +def model_provider(): + """Build the model.""" + + transformer_config = TransformerConfig( + num_layers=2, + hidden_size=64, + num_attention_heads=2, + use_cpu_initialization=True, + pipeline_dtype=torch.float32 + ) + + gpt_model = GPTModel( + config=transformer_config, + transformer_layer_spec=get_gpt_layer_local_spec(), + vocab_size=_VOCAB_SIZE, + max_sequence_length=_SEQUENCE_LENGTH, + ) + + return gpt_model + +def load_distributed_checkpoint(checkpoint_path, gpt_model): + sharded_state_dict=gpt_model.sharded_state_dict(prefix='') + checkpoint = dist_checkpointing.load(sharded_state_dict=sharded_state_dict, checkpoint_dir=checkpoint_path) + gpt_model.load_state_dict(checkpoint) + return gpt_model + +if __name__ == "__main__": + initialize_distributed(tensor_model_parallel_size=2, pipeline_model_parallel_size=1) + model_parallel_cuda_manual_seed(123) + + gpt_model = model_provider() + device = torch.device("cuda") + gpt_model.to(device) + + # Optionally you can also load a gpt model from ckpt_path using this code below + # gpt_model = load_distributed_checkpoint(gpt_model=gpt_model, checkpoint_path=ckpt_path) + + seq_len_interpolation_factor = None + if hasattr(gpt_model, "rotary_pos_emb"): + seq_len_interpolation_factor = gpt_model.rotary_pos_emb.seq_len_interpolation_factor + + trtllm_helper = TRTLLMHelper( + transformer_config=gpt_model.config, + model_type=ModelType.gpt, + position_embedding_type = gpt_model.position_embedding_type, + max_position_embeddings = gpt_model.max_position_embeddings, + rotary_percentage = gpt_model.rotary_percent, + rotary_base = gpt_model.rotary_base, + moe_tp_mode = 2, + multi_query_mode = False, + activation = "gelu", + seq_len_interpolation_factor = seq_len_interpolation_factor, + share_embeddings_and_output_weights=gpt_model.share_embeddings_and_output_weights + ) + + + trtllm_model_weights, trtllm_model_config = trtllm_helper.get_trtllm_pretrained_config_and_model_weights( + model_state_dict= gpt_model.state_dict(), + dtype = DataType.bfloat16, + on_device_distributed_conversion=True, + vocab_size=_VOCAB_SIZE, + gpus_per_node=2, + ) + + trtllm_helper.build_and_save_engine( + max_input_len=256, + max_output_len=256, + max_batch_size=8, + engine_dir='/opt/megatron-lm/engine', + trtllm_model_weights=trtllm_model_weights[0], + trtllm_model_config=trtllm_model_config[0], + lora_ckpt_list=None, + use_lora_plugin=None, + max_lora_rank=64, + lora_target_modules=None, + max_prompt_embedding_table_size=0, + paged_kv_cache=True, + remove_input_padding=True, + paged_context_fmha=False, + use_refit=False, + max_num_tokens=None, + max_seq_len=512, + opt_num_tokens=None, + max_beam_width=1, + tokens_per_block=128, + multiple_profiles=False, + gpt_attention_plugin="auto", + gemm_plugin="auto", + ) diff --git a/examples/export/trtllm_export/single_device_export/gpt_single_device_cpu_export.py b/examples/export/trtllm_export/single_device_export/gpt_single_device_cpu_export.py new file mode 100644 index 0000000000..587e7cfdd3 --- /dev/null +++ b/examples/export/trtllm_export/single_device_export/gpt_single_device_cpu_export.py @@ -0,0 +1,118 @@ +import os +import torch +from megatron.core import parallel_state +from megatron.core import dist_checkpointing +from megatron.core.export.model_type import ModelType +from megatron.core.export.data_type import DataType +from megatron.core.export.export_config import ExportConfig +from megatron.core.export.trtllm.trtllm_helper import TRTLLMHelper +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.models.gpt.gpt_model import GPTModel +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_local_spec + + +_SEQUENCE_LENGTH = 64 + + +def initialize_distributed(tensor_model_parallel_size=1, pipeline_model_parallel_size=1): + parallel_state.destroy_model_parallel() + + # Torch setup for distributed training + rank = int(os.environ['LOCAL_RANK']) + world_size = torch.cuda.device_count() + torch.cuda.set_device(rank) + torch.distributed.init_process_group(world_size=world_size, rank=rank) + + # Megatron core distributed training initialization + parallel_state.initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size) + +def model_provider(): + """Build the model.""" + + transformer_config = TransformerConfig( + num_layers=2, + hidden_size=64, # Needs to be atleast 32 times num_attn_heads + num_attention_heads=2, + use_cpu_initialization=True, + pipeline_dtype=torch.float32, + ) + + gpt_model = GPTModel( + config=transformer_config, + transformer_layer_spec=get_gpt_layer_local_spec(), + vocab_size=100, + max_sequence_length=_SEQUENCE_LENGTH, + ) + + return gpt_model + +def load_distributed_checkpoint(checkpoint_path, gpt_model): + sharded_state_dict=gpt_model.sharded_state_dict(prefix='') + checkpoint = dist_checkpointing.load(sharded_state_dict=sharded_state_dict, checkpoint_dir=checkpoint_path) + gpt_model.load_state_dict(checkpoint) + return gpt_model + +if __name__ == "__main__": + # Need to use TP1 PP1 for export on single device + initialize_distributed(tensor_model_parallel_size=1, pipeline_model_parallel_size=1) + model_parallel_cuda_manual_seed(123) + + gpt_model = model_provider() + + # Optionally you can also load a gpt model from ckpt_path using this code below + # gpt_model = load_distributed_checkpoint(gpt_model=gpt_model, checkpoint_path=ckpt_path) + + seq_len_interpolation_factor = None + if hasattr(gpt_model, "rotary_pos_emb"): + seq_len_interpolation_factor = gpt_model.rotary_pos_emb.seq_len_interpolation_factor + + trtllm_helper = TRTLLMHelper( + transformer_config=gpt_model.config, + model_type=ModelType.gpt, + position_embedding_type = gpt_model.position_embedding_type, + max_position_embeddings = gpt_model.max_position_embeddings, + rotary_percentage = gpt_model.rotary_percent, + rotary_base = gpt_model.rotary_base, + moe_tp_mode = 2, + multi_query_mode = False, + activation = "gelu", + seq_len_interpolation_factor = seq_len_interpolation_factor, + share_embeddings_and_output_weights=gpt_model.share_embeddings_and_output_weights + ) + + + export_config = ExportConfig(inference_tp_size = 2) + # NOTE : For faster performance, if your entire model will fit in gpu memory, transfer model state dict to GPU and then call this api + weight_list, config_list = trtllm_helper.get_trtllm_pretrained_config_and_model_weights( + model_state_dict= gpt_model.state_dict(), + dtype = DataType.bfloat16, + export_config=export_config + ) + + for trtllm_model_weights, trtllm_model_config in zip(weight_list, config_list): + trtllm_helper.build_and_save_engine( + max_input_len=256, + max_output_len=256, + max_batch_size=8, + engine_dir='/opt/megatron-lm/engine', + trtllm_model_weights=trtllm_model_weights, + trtllm_model_config=trtllm_model_config, + lora_ckpt_list=None, + use_lora_plugin=None, + max_lora_rank=64, + lora_target_modules=None, + max_prompt_embedding_table_size=0, + paged_kv_cache=True, + remove_input_padding=True, + paged_context_fmha=False, + use_refit=False, + max_num_tokens=None, + max_seq_len=512, + opt_num_tokens=None, + max_beam_width=1, + tokens_per_block=128, + multiple_profiles=False, + gpt_attention_plugin="auto", + gemm_plugin="auto", + ) \ No newline at end of file diff --git a/examples/finetune_mnli_distributed.sh b/examples/finetune_mnli_distributed.sh deleted file mode 100755 index 9219e595dd..0000000000 --- a/examples/finetune_mnli_distributed.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -WORLD_SIZE=8 - -DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ - --nnodes 1 \ - --node_rank 0 \ - --master_addr localhost \ - --master_port 6000" - -TRAIN_DATA="data/glue_data/MNLI/train.tsv" -VALID_DATA="data/glue_data/MNLI/dev_matched.tsv \ - data/glue_data/MNLI/dev_mismatched.tsv" -PRETRAINED_CHECKPOINT=checkpoints/bert_345m -VOCAB_FILE=bert-vocab.txt -CHECKPOINT_PATH=checkpoints/bert_345m_mnli - -python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \ - --task MNLI \ - --seed 1234 \ - --train-data $TRAIN_DATA \ - --valid-data $VALID_DATA \ - --tokenizer-type BertWordPieceLowerCase \ - --vocab-file $VOCAB_FILE \ - --epochs 5 \ - --pretrained-checkpoint $PRETRAINED_CHECKPOINT \ - --tensor-model-parallel-size 1 \ - --num-layers 24 \ - --hidden-size 1024 \ - --num-attention-heads 16 \ - --micro-batch-size 8 \ - --activations-checkpoint-method uniform \ - --lr 5.0e-5 \ - --lr-decay-style linear \ - --lr-warmup-fraction 0.065 \ - --seq-length 512 \ - --max-position-embeddings 512 \ - --save-interval 500000 \ - --save $CHECKPOINT_PATH \ - --log-interval 10 \ - --eval-interval 100 \ - --eval-iters 50 \ - --weight-decay 1.0e-1 \ - --fp16 diff --git a/examples/finetune_race_distributed.sh b/examples/finetune_race_distributed.sh deleted file mode 100755 index e7f70a70ab..0000000000 --- a/examples/finetune_race_distributed.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -WORLD_SIZE=8 - -DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ - --nnodes 1 \ - --node_rank 0 \ - --master_addr localhost \ - --master_port 6000" - -TRAIN_DATA="data/RACE/train/middle" -VALID_DATA="data/RACE/dev/middle \ - data/RACE/dev/high" -VOCAB_FILE=bert-vocab.txt -PRETRAINED_CHECKPOINT=checkpoints/bert_345m -CHECKPOINT_PATH=checkpoints/bert_345m_race - -python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \ - --task RACE \ - --seed 1234 \ - --train-data $TRAIN_DATA \ - --valid-data $VALID_DATA \ - --tokenizer-type BertWordPieceLowerCase \ - --vocab-file $VOCAB_FILE \ - --epochs 3 \ - --pretrained-checkpoint $PRETRAINED_CHECKPOINT \ - --tensor-model-parallel-size 1 \ - --num-layers 24 \ - --hidden-size 1024 \ - --num-attention-heads 16 \ - --micro-batch-size 4 \ - --activations-checkpoint-method uniform \ - --lr 1.0e-5 \ - --lr-decay-style linear \ - --lr-warmup-fraction 0.06 \ - --seq-length 512 \ - --max-position-embeddings 512 \ - --save-interval 100000 \ - --save $CHECKPOINT_PATH \ - --log-interval 10 \ - --eval-interval 100 \ - --eval-iters 50 \ - --weight-decay 1.0e-1 \ - --clip-grad 1.0 \ - --hidden-dropout 0.1 \ - --attention-dropout 0.1 \ - --fp16 diff --git a/examples/finetune_retriever_distributed.sh b/examples/finetune_retriever_distributed.sh deleted file mode 100755 index 535a2e053d..0000000000 --- a/examples/finetune_retriever_distributed.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -# Finetune a BERT or pretrained ICT model using Google natural question data -# Datasets can be downloaded from the following link: -# https://github.com/facebookresearch/DPR/blob/master/data/download_data.py - -WORLD_SIZE=8 - -DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ - --nnodes 1 \ - --node_rank 0 \ - --master_addr localhost \ - --master_port 6000" - -CHECKPOINT_PATH= - -# Load either of the below -BERT_LOAD_PATH= -PRETRAINED_CHECKPOINT= - -python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \ - --task RET-FINETUNE-NQ \ - --train-with-neg \ - --train-hard-neg 1 \ - --pretrained-checkpoint ${PRETRAINED_CHECKPOINT} \ - --num-layers 12 \ - --hidden-size 768 \ - --num-attention-heads 12 \ - --tensor-model-parallel-size 1 \ - --tokenizer-type BertWordPieceLowerCase \ - --train-data nq-train.json \ - --valid-data nq-dev.json \ - --save ${CHECKPOINT_PATH} \ - --load ${CHECKPOINT_PATH} \ - --vocab-file bert-vocab.txt \ - --bert-load ${BERT_LOAD_PATH} \ - --save-interval 5000 \ - --log-interval 10 \ - --eval-interval 20000 \ - --eval-iters 100 \ - --indexer-log-interval 1000 \ - --faiss-use-gpu \ - --DDP-impl torch \ - --fp16 \ - --retriever-report-topk-accuracies 1 5 10 20 100 \ - --seq-length 512 \ - --retriever-seq-length 256 \ - --max-position-embeddings 512 \ - --retriever-score-scaling \ - --epochs 80 \ - --micro-batch-size 8 \ - --eval-micro-batch-size 16 \ - --indexer-batch-size 128 \ - --lr 2e-5 \ - --lr-warmup-fraction 0.01 \ - --weight-decay 1e-1 diff --git a/examples/gpt3/README.md b/examples/gpt3/README.md new file mode 100644 index 0000000000..8d6f267416 --- /dev/null +++ b/examples/gpt3/README.md @@ -0,0 +1,57 @@ +# GPT3 MODEL + +## Table of contents +- [1. Training Setup](#1-training-setup) +- [2. Configurations](#2-configurations) +- [3. Training Results](#3-training-results) + +## 1. Training setup + + +To run the model using a docker container run it as follows +``` +PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:24.01-py3 +CHECKPOINT_PATH="" # +TENSORBOARD_LOGS_PATH=""# +VOCAB_FILE="" #/gpt2-vocab.json +MERGE_FILE="" #/gpt2-merges.txt +DATA_PATH="" #_text_document + +docker run \ + --gpus=all \ + --ipc=host \ + --workdir /workspace/megatron-lm \ + -v /path/to/data:/path/to/data \ + -v /path/to/megatron-lm:/workspace/megatron-lm \ + megatron-lm nvcr.io/nvidia/pytorch:24.01-py3 \ + bash examples/gpt3/train_gpt3_175b_distributed.sh $CHECKPOINT_PATH $TENSORBOARD_LOGS_PATH $VOCAB_FILE $MERGE_FILE $DATA_PATH " + +``` +NOTE: Depending on the environment you are running it the above command might like slightly different. + + +## 2. Configurations + +The example in this folder shows you how to run 175B model. There are other configs you could run as well + +### 345M +``` + --num-layers 12 \ + --hidden-size 512 \ + --num-attention-heads 8 \ + --seq-length 1024 \ + --tensor-model-parallel-size 1 \ + --pipeline-model-parallel-size 1 \ + +``` + +### 857M +``` + --num-layers 24 \ + --hidden-size 1024 \ + --num-attention-heads 16 \ + --seq-length 2048 \ + --tensor-model-parallel-size 1 \ + --pipeline-model-parallel-size 1 \ + +``` diff --git a/examples/gpt3/gpt_config.yaml b/examples/gpt3/gpt_config.yaml new file mode 100644 index 0000000000..2fd40e6214 --- /dev/null +++ b/examples/gpt3/gpt_config.yaml @@ -0,0 +1,300 @@ +# WARNING: Yaml configs is currently an experimental feature +language_model: + # model architecture + num_layers: 24 + hidden_size: 1024 + num_attention_heads: 16 + num_query_groups: null + + ffn_hidden_size: null + kv_channels: null + hidden_dropout: 0.0 + attention_dropout: 0.0 + fp32_residual_connection: False + + apply_residual_connection_post_layernorm: False + layernorm_epsilon: 1.e-5 + layernorm_zero_centered_gamma: True + add_bias_linear: False + bias_activation_fusion: False + add_qkv_bias: False + gated_linear_unit: False + activation_func: swiglu + num_moe_experts: null + rotary_interleaved: False + window_size: null + + # initialization + init_method: null + init_method_std: 0.02 + output_layer_init_method: null + + # mixed-precision + apply_query_key_layer_scaling: False + attention_softmax_in_fp32: False + + # fusion + bias_swiglu_fusion: True + masked_softmax_fusion: True + persist_layer_norm: False + memory_efficient_layer_norm: False + bias_dropout_fusion: True + apply_rope_fusion: True + + # activation recomputation + recompute_granularity: null + recompute_method: null + recompute_num_layers: null + distribute_saved_activations: null + + # fp8 related + fp8: null + fp8_margin: 0 + fp8_interval: 1 + fp8_amax_history_len: 1 + fp8_amax_compute_algo: "most_recent" + fp8_wgrad: True + + # miscellaneous + clone_scatter_output_in_embedding: True + + normalization: "LayerNorm" # alt value supported by TE: "RMSNorm" + + # MoE related + moe_router_load_balancing_type: "aux_loss" + moe_router_topk: 2 + moe_router_group_topk: null + moe_router_num_groups: null + moe_grouped_gemm: False + moe_aux_loss_coeff: 0 # 1e-2 would be a good start value for load balance loss. + moe_z_loss_coeff: null # 1e-3 would be a good start value for z-loss + moe_input_jitter_eps: null + moe_token_dropping: False + +model_parallel: + # Model parallelism + tensor_model_parallel_size: 1 + context_parallel_size: 1 + pipeline_model_parallel_size: 1 + virtual_pipeline_model_parallel_size: null + sequence_parallel: True + expert_model_parallel_size: 1 + + # Initialization + perform_initialization: True + use_cpu_initialization: null + + # Training + fp16: False + bf16: True + params_dtype: null # Set from above arguments for core + timers: null + + # Optimizations + gradient_accumulation_fusion: True + async_tensor_model_parallel_allreduce: True + tp_comm_overlap: False + + # Debug Options + tp_comm_split_ag: True + tp_comm_atomic_ag: True + tp_comm_split_rs: True + tp_comm_atomic_rs: True + tp_comm_bulk_wgrad: True + tp_comm_bulk_dgrad: True + + # Parallelism + finalize_model_grads_func: null + + # Pipeline Parallel + pipeline_dtype: null + grad_scale_func: null + enable_autocast: False + autocast_dtype: null + variable_seq_lengths: False + num_microbatches_with_partial_activation_checkpoints: null + overlap_p2p_comm: False + batch_p2p_comm: True + batch_p2p_sync: True + use_ring_exchange_p2p: False + deallocate_pipeline_outputs: False + no_sync_func: null + grad_sync_func: null + param_sync_func: null + + # CPU Offloading + cpu_offloading: False + cpu_offloading_num_layers: 0 + _cpu_offloading_context: null + cpu_offloading_weights: False + cpu_offloading_activations: True + + # Timing + barrier_with_L1_time: True + +# training: +use_legacy_models: False +spec: null +micro_batch_size: 2 +global_batch_size: 128 +rampup_batch_size: [32, 32, 65324160] +check_for_nan_in_loss_and_grad: True +num_layers_per_virtual_pipeline_stage: null + +encoder_num_layers: null +decoder_num_layers: null +rotary_seq_len_interpolation_factor: null +add_position_embedding: False +make_vocab_size_divisible_by: 128 +group_query_attention: False + + +exit_signal_handler: False +exit_duration_in_mins: null +exit_interval: null + +untie_embeddings_and_output_weights: True +position_embedding_type: rope +rotary_percent: 0.5 +openai_gelu: False +squared_relu: False +swiglu: True +onnx_safe: null +bert_binary_head: True +max_position_embeddings: 4096 + +transformer_impl: local +use_flash_attn: False +seed: 1234 +data_parallel_random_init: False + +# Optimizer +optimizer: adam +lr: 2.5e-4 +lr_decay_style: cosine +lr_decay_iters: null +lr_decay_samples: 255126953 +lr_warmup_fraction: null +lr_warmup_iters: 0 +lr_warmup_samples: 81381 +lr_warmup_init: 0.0 +min_lr: 2.5e-5 +weight_decay: 0.1 +start_weight_decay: null +end_weight_decay: null +weight_decay_incr_style: constant +clip_grad: 1.0 +adam_beta1: 0.9 +adam_beta2: 0.95 +adam_eps: 1.e-08 +sgd_momentum: 0.9 +override_opt_param_scheduler: False +use_checkpoint_opt_param_scheduler: False + +# checkpointing arguments +save: null +save_interval: 20000 +no_save_optim: null +no_save_rng: null +load: null +no_load_optim: null +no_load_rng: null +finetune: False +use_checkpoint_args: False +exit_on_missing_checkpoint: False + +# loss arguments +loss_scale: null +initial_loss_scale: 4294967296 +min_loss_scale: 1.0 +loss_scale_window: 1000 +hysteresis: 2 +accumulate_allreduce_grads_in_fp32: False +fp16_lm_cross_entropy: False + +# distributed arguments +distributed_backend: nccl +distributed_timeout_minutes: 10 +overlap_grad_reduce: False +align_grad_reduce: True +overlap_param_gather: False +align_param_gather: False +scatter_gather_tensors_in_pipeline: True +local_rank: null +lazy_mpu_init: null +empty_unused_memory_level: 0 +standalone_embedding_stage: False +use_distributed_optimizer: False +nccl_communicator_config_path: null + +train_iters: null +eval_iters: 32 +eval_interval: 2000 +skip_train: False + +adlr_autoresume: False +adlr_autoresume_interval: 1000 + +# garbage collection +manual_gc: False +manual_gc_interval: 0 +manual_gc_eval: True + +tp_comm_overlap_cfg: null + +#data +data_path: null +split: '99,1,0' +train_data_path: null +valid_data_path: null +test_data_path: null +data_cache_path: null +mock_data: False +vocab_size: null +vocab_file: null +merge_file: null +vocab_extra_ids: 0 +seq_length: 4096 +encoder_seq_length: null +decoder_seq_length: null +retriever_seq_length: 256 +sample_rate: 1.0 +mask_prob: 0.15 +short_seq_prob: 0.1 +num_workers: 2 +tokenizer_type: GPTSentencePieceTokenizer +tokenizer_model: null +reset_position_ids: False +reset_attention_mask: False +eod_mask_loss: False +train_samples: 268554688 +dataloader_type: null + +#profile: +profile: False +profile_ranks: [0] +profile_step_end: 12 +profile_step_start: 10 + +#logging: +log_params_norm: True +log_num_zeros_in_grad: True +log_throughput: False +log_progress: False +timing_log_level: 0 +timing_log_option: minmax +tensorboard_log_interval: 1 +tensorboard_queue_size: 1000 +log_timers_to_tensorboard: False +log_validation_ppl_to_tensorboard: False +log_memory_to_tensorboard: False +log_world_size_to_tensorboard: False +log_loss_scale_to_tensorboard: True +wandb_project: '' +wandb_exp_name: '' +wandb_save_dir: '' +enable_one_logger: True +one_logger_project: megatron-lm +one_logger_run_name: null +log_interval: 100 +tensorboard_dir: null diff --git a/examples/gpt3/train_gpt3_175b_distributed.sh b/examples/gpt3/train_gpt3_175b_distributed.sh new file mode 100755 index 0000000000..7d2c01b315 --- /dev/null +++ b/examples/gpt3/train_gpt3_175b_distributed.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +# Runs the "175B" parameter model + +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +GPUS_PER_NODE=8 +# Change for multinode config +MASTER_ADDR=localhost +MASTER_PORT=6000 +NUM_NODES=1 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) + +CHECKPOINT_PATH=$1 # +TENSORBOARD_LOGS_PATH=$2 # +VOCAB_FILE=$3 #/gpt2-vocab.json +MERGE_FILE=$4 #/gpt2-merges.txt +DATA_PATH=$5 #_text_document + +DISTRIBUTED_ARGS=( + --nproc_per_node $GPUS_PER_NODE + --nnodes $NUM_NODES + --master_addr $MASTER_ADDR + --master_port $MASTER_PORT +) + +GPT_MODEL_ARGS=( + --num-layers 96 + --hidden-size 12288 + --num-attention-heads 96 + --seq-length 2048 + --max-position-embeddings 2048 + --attention-backend auto # Can use (flash/fused/unfused/local) +) + +TRAINING_ARGS=( + --micro-batch-size 1 + --global-batch-size 1536 + --rampup-batch-size 16 16 5859375 + --train-iters 500000 + --weight-decay 0.1 + --adam-beta1 0.9 + --adam-beta2 0.95 + --init-method-std 0.006 + --clip-grad 1.0 + --fp16 + --lr 6.0e-5 + --lr-decay-style cosine + --min-lr 6.0e-6 + --lr-warmup-fraction .001 + --lr-decay-iters 430000 +) + +MODEL_PARALLEL_ARGS=( + --tensor-model-parallel-size 8 + --pipeline-model-parallel-size 16 +) + +DATA_ARGS=( + --data-path $DATA_PATH + --vocab-file $VOCAB_FILE + --merge-file $MERGE_FILE + --split 949,50,1 +) + +EVAL_AND_LOGGING_ARGS=( + --log-interval 100 + --save-interval 10000 + --eval-interval 1000 + --save $CHECKPOINT_PATH + --load $CHECKPOINT_PATH + --eval-iters 10 + --tensorboard-dir $TENSORBOARD_LOGS_PATH +) + +torchrun ${DISTRIBUTED_ARGS[@]} pretrain_gpt.py \ + ${GPT_MODEL_ARGS[@]} \ + ${TRAINING_ARGS[@]} \ + ${MODEL_PARALLEL_ARGS[@]} \ + ${DATA_ARGS[@]} \ + ${EVAL_AND_LOGGING_ARGS[@]} diff --git a/examples/inference/README.md b/examples/inference/README.md new file mode 100644 index 0000000000..7bba32868f --- /dev/null +++ b/examples/inference/README.md @@ -0,0 +1,289 @@ +### Megatron Core Inference Documentation +This guide provides an example for Megatron Core for running model inference. + +### Contents +- [Megatron Core Inference Documentation](#megatron-core-inference-documentation) +- [Contents](#contents) + - [1. Quick Start](#1-quick-start) + - [1.1 Understanding The Code](#11-understanding-the-code) + - [1.2 Running The Code](#12-running-the-code) + - [2. Flow of Control In MCore Backend](#2-flow-of-control-in-mcore-backend) + - [3. Customizing The Inference Pipeline](#3-customizing-the-inference-pipeline) + - [3.1. Create Your Own Inference Backend](#31-create-your-own-inference-backend) + - [3.2. Create Your Own Text Generation Controller](#32-create-your-own-text-generation-controller) + - [3.3. Support Other Models](#33-support-other-models) + - [3.3. Modify Inference Parameters](#33-modify-inference-parameters) + - [4. Future work](#4-future-work) + +
+ +#### 1. Quickstart +This example runs statically-batched inference on a model trained using Megatron Core. The entrypoint is [gpt_static_inference.py](./gpt/gpt_static_inference.py). A similar workflow can be adapted for [gpt_dynamic_inference.py](./gpt/gpt_dynamic_inference.py). + +
+ +##### 1.1 Code Walkthrough +***STEP 1 - Initialize model parallel and other default arguments*** +The micro batch size defaults to 1. It is not used in tensor-parallelism only, and for pipeline-parallel models it is calculated at runtime. +```python +# Initialize Megatron model using the same model provider from training. + initialize_megatron( + args_defaults={'no_load_rng': True, 'no_load_optim': True, 'micro_batch_size': 1} + ) +``` + +***STEP 2 - Load the model using the model_provider_function*** +The model provider function supports both MCore and Legacy models. + +```python + # Load the model checkpoint + model = get_model(model_provider, wrap_with_ddp=False) + load_checkpoint(model, None, None) + model.eval() + model = model[0] +``` + +***STEP 3 - Choose an engine*** +Text generation requires an inference engine, which includes a scheduler. The default engine is the [Megatron Core engine](../../megatron/core/inference/engine/mcore_engine.py) with a [text generation controller](../../megatron/core/inference/text_generation_controllers/text_generation_controller.py). TRTLLMEngine will be supported in the future. +```python + # Create an inference wrapper to setup the model. + inference_wrapped_model = GPTInferenceWrapper(model, args) + + # Define a sampling loop. + text_generation_controller = TextGenerationController( + inference_wrapped_model=inference_wrapped_model, + tokenizer=tokenizer + ) + + # Create a static or dynamic inference engine. + inference_engine = StaticInferenceEngine( + text_generation_controller=text_generation_controller, + max_batch_size=args.max_batch_size +) +``` + +***STEP 4 - Run text generation*** +The [SamplingParams](../../megatron/core/inference/sampling_params.py) class uses suggested defaults. Customize this to change top_p, top_k, number of tokens to generate, etc. The result is returned as a list of [InferenceRequests](../../megatron/core/inference/inference_request.py). +```python + results: List[InferenceRequest] = inference_engine.generate( + prompts=args.prompts, sampling_params=sampling_params + ) + + if torch.distributed.get_rank() == 0: + for idx, result in enumerate(results): + print(f' ------------- RESULT FOR PROMPT {idx} --------------- ') + result = { + 'id': result.request_id, + 'input_prompt': result.prompt, + 'generated_text': result.generated_text, + 'generated_tokens' : result.generated_tokens + } + print(result) +``` + +
+ +##### 1.2 Running The Code +An example Slurm script is shown below. Set the tokenizer paths, inference params, and other settings appropriately. + +For a recap on sampling parameters, refer to [this blog](https://ivibudh.medium.com/a-guide-to-controlling-llm-model-output-exploring-top-k-top-p-and-temperature-parameters-ed6a31313910). + +``` +# Slurm cluster settings +ACCOUNT= +MLM_PATH=/path/to/megatron-lm +GPT_CKPT=/path/to/gpt/ckpt +VOCAB_MERGE_FILE_PATH=/path/to/vocab/and/merge/file +CONTAINER_IMAGE=nvcr.io/ea-bignlp/ga-participants/nemofw-training:23.11 + +srun --account $ACCOUNT \ +--job-name=$ACCOUNT:inference \ +--partition=batch \ +--time=01:00:00 \ +--container-image $CONTAINER_IMAGE \ +--container-mounts $MLM_PATH:/workspace/megatron-lm/,$GPT_CKPT:/workspace/mcore_gpt_ckpt,$VOCAB_MERGE_FILE_PATH:/workspace/tokenizer \ +--no-container-mount-home \ +--pty /bin/bash \ + +# Inside the container run the following. + +cd megatron-lm/ +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +TOKENIZER_ARGS=( + --vocab-file /workspace/tokenizer/gpt2-vocab.json + --merge-file /workspace/tokenizer/gpt2-merges.txt + --tokenizer-type GPT2BPETokenizer +) + +MODEL_ARGS=( + --use-checkpoint-args + --use-mcore-models + --load /workspace/mcore_gpt_ckpt +) + +INFERENCE_SPECIFIC_ARGS=( + --attention-dropout 0.0 + --hidden-dropout 0.0 + --num-tokens-to-generate 20 + --max-batch-size 4 +) + +torchrun --nproc-per-node=4 examples/inference/gpt/gpt_static_inference.py \ + ${TOKENIZER_ARGS[@]} \ + ${MODEL_ARGS[@]} \ + ${INFERENCE_SPECIFIC_ARGS[@]} \ + --prompts "prompt one " "sample prompt two" "sample prompt 3" + +NOTE: Other parameters which can be customized for inference: +--temperature (Sampling temperature) +--top_k (top_k sampling) +--top_p (top_p sampling) +--num-tokens-to-generate (Number of tokens to generate for each prompt) +--inference-batch-times-seqlen-threshold (During inference, if batch-size times sequence-length is smaller than this threshold then we will not use microbatched pipelining.') +--use-dist-ckpt (If using dist checkpoint format for the model) +--use-legacy-models (If using legacy models instead of MCore models) + +``` + + +
+ + +#### 2. Control Flow in the MCore Backend +An example of inference with static batching is provided in [gpt_static_inference.py](./gpt/gpt_static_inference.py). +* [mcore_engine](../../megatron/core/inference/engines/mcore_engine.py) **generate()** function is called with the input prompts. +* The `Scheduler` in the engine will add these prompts to the [active requests] pool (../../megatron/core/inference/inference_request.py) until max batch size is hit. Remaining requests will be added to the waiting requests pool. +* The engine will run until all requests (waiting + active) are completed. + * The active requests are passed into **generate_all_output_tokens_static_batch()** of the text generation controller . + * This function uses the **prep_model_for_inference()** method of the [model_inference_wrappers](../../megatron/core/inference/model_inference_wrappers/abstract_model_inference_wrapper.py) and runs an autoregressive sampling loop + * In the autoregressive loop, the **get_batch_for_context_window()** method of the inference wrapper is called to slice out the input tokens and masks + * Input tokens and masks are passed it into the **run_one_forward_step()** method, which calls the model `.forward()` method to get the output logits + * Output logits are synchronized across all pipeline parallel ranks + * The text generation controller obtains the log probabilities and samples tokens based on the strategy defined in the sampling parameters. + * The sampled tokens are then appended to the input prompt tokens for the next iteration + * The **update_generation_status()** method of the text generation controller checks which prompts have finished generating or hit a stop condition + * After the inference loop, the result is detokenized and stored as an attribute of the InferenceRequest. These requests are marked as completed. + * The **update_requests_pool()** method of the scheduler moves completed requests into the completed request pool and waiting requests into the active request pool + +
+ +#### 3. Customizing The Inference Pipeline + +The inference pipeline supports three levels of customization: + +* **Inference engine** - The MCore Engine supports static and dynamic batching. Modify this to add a new backend. +* **Text generation controller** - The main sampling loop. Customize this to support alternative tokenization or implement a new sampling strategy. +* **Inference Wrapped Model** - Change this to support a new model. +* **Modify Inference Parameters** - Change this to update top_p, top_k, number of tokens to be generated, temperature, and other sampling parameters. + +
+ +##### 3.1. Create Your Own Inference Backend +The [abstract_engine.py](./../../megatron/core/inference/engine/abstract_engine.py) file contains a `generate` method that can be extended to support a new backend. + +```python +class AbstractEngine(ABC): + @staticmethod + def generate(self) -> dict: + """The abstract backend's generate function. + + To define a new backend, implement this method and return the outputs as a dictionary. +``` + +
+ +##### 3.2. Implement a new Sampling Loop + +The [TextGenerationController](../../megatron/core/inference/text_generation_controllers/text_generation_controller.py) contains the main sampling loop and can be modified to support new tokenization, detokenization, or sampling strategies. + +``` python +class TextGenerationController: + + def tokenize_prompt(self, prompt: str) -> Tuple[torch.Tensor, torch.Tensor]: + """Utility to tokenize the input prompts""" + + def sample_from_logits( + self, + last_token_logits: torch.Tensor, + sampling_params: SamplingParams, + vocab_size: int, + generation_started : Optional[torch.Tensor] = None, + top_n_logprobs_dict: Dict[int, List[Dict[str, float]]] = None, + ) -> torch.Tensor: + """Samples the logits to generate outputs + + Given the logits of the last token, this function samples according to the parameters defined in sampling_params and returns the sampled tokens. If sampling_params.top_n_logprobs > 0 + at each step it also updates the top_n_logprobs_dict. + """ + + def update_generation_status( + self, + updated_prompts_tokens: torch.Tensor, + generation_started: torch.Tensor, + current_context_end_position: int, + is_generation_done_tensor: torch.Tensor, + generated_sequence_lengths: torch.Tensor, + ) -> torch.Tensor: + """Function to check which prompts have reached an end condition + + We check which prompts have reached an end condition and set the corresponding flags of the is_generation_done_tensor to True . The generated sequence lengths increases as we keep generating, until that prompts hits an eod condition. The generation started status tensor helps us determine which prompts have started generating + """ + + def generate_all_output_tokens_static_batch( + self, active_requests: OrderedDict[int, InferenceRequest], + ) -> OrderedDict[int, InferenceRequest]: + """Utility to generate all the output tokens and probabilities for the prompts . + + This utility generates the output tokens for a static batch. It runs the forward steps till all prompts complete generation, updates the status of these requests to completed, adds the generated result and returns these requests + """ + + def detokenize_generations(self, prompt_tokens_with_generated_tokens: torch.Tensor) -> str: + """Detokenize the output generations""" +``` + +
+ +##### 3.3. Support Other Models +Extend [abstract_model_inference_wrapper.py](./../../megatron/core/inference/model_inference_wrappers/abstract_model_inference_wrapper.py) to support other models. The abstract model wrapper implements: +* Forward method which calls the model `forward` method depending on model parallel settings +* Initializes the model and puts it in `.eval()` mode +* Setup for the input parameters (max batch size, max seq length) + +The following methods should be implemented: +```python +class AbstractModelInferenceWrapper: + def prep_model_for_inference(self, prompts_tokens: torch.Tensor): + """A utility function for preparing model for inference + + The function gets called once before the auto regressive inference loop. It puts the model in eval mode , and gets some model and inference data parameters. Extend this to build position ids ,attention mask etc, so that required slices can be extracted during the forward pass + """ + + @abc.abstractclassmethod + def get_batch_for_context_window(self) -> List: + """Returns the input data for inference + + This function gets called iteratively in the inference loop. It can be used to extract relevant input from the prompt tokens, attention mask etc. required for each step in inference. +``` + +Refer to [gpt_inference_wrapper.py](../../megatron/core/inference/model_inference_wrappers/gpt/gpt_inference_wrapper.py) for an example of implementing this for GPTModel. + +
+ +##### 3.3. Modify Inference Parameters +We use [common inference params](../../megatron/core/inference/sampling_params.py) for text generation. Customize this to change `top_p`, `top_k`, number of tokens to generate etc. Other attributes can be added for the inference loop as shown below. + +``` +from megatron.core.inference.sampling_params import SamplingParams + +c = SamplingParams(temperature=0.5) +c.add_attributes({'min_length':4, 'eod_id':153}) +``` + +
+ +#### 4. Future work +The following features are planned for future releases. +* TRTLLM Engine support +* Continuous batching optimizations +* Speculative decoding \ No newline at end of file diff --git a/examples/inference/gpt/gpt_dynamic_inference.py b/examples/inference/gpt/gpt_dynamic_inference.py new file mode 100644 index 0000000000..069bd65966 --- /dev/null +++ b/examples/inference/gpt/gpt_dynamic_inference.py @@ -0,0 +1,507 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + +import hashlib +import json +import math +import os +import pickle +import sys +import torch +from argparse import ArgumentParser +from collections import defaultdict +from functools import partial +from tqdm import tqdm +from typing import Dict, List + +import torch +from tqdm import tqdm + +from megatron.core.inference.contexts.dynamic_context import ( + ContextOverflowError, + DynamicInferenceContext, +) +from megatron.core.inference.engines import DynamicInferenceEngine +from megatron.core.inference.model_inference_wrappers.gpt.gpt_inference_wrapper import ( + GPTInferenceWrapper, +) +from megatron.core.inference.sampling_params import SamplingParams +from megatron.core.inference.text_generation_controllers.text_generation_controller import ( + TextGenerationController, +) +from megatron.core.tokenizers.text.utils.build_tokenizer import build_tokenizer +from megatron.core.transformer.module import MegatronModule + +sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)) +) +from megatron.training import get_args, get_model as _get_model, get_tokenizer, initialize_megatron +from megatron.training.checkpointing import load_checkpoint + +from megatron.core.utils import configure_nvtx_profiling +from model_provider import model_provider +from gpt_builders import gpt_builder + +import json + +from examples.inference.gpt.utils import ( + Request, + add_common_inference_args, + build_dynamic_engine_setup_prefix, + build_requests, + get_curr_time, +) +from megatron.training import get_args +from megatron.training import get_model as _get_model +from megatron.training import get_tokenizer, initialize_megatron +from megatron.training.checkpointing import load_checkpoint +from pretrain_gpt import model_provider + +import torch +import io +import megatron + +torch.serialization.add_safe_globals([io.BytesIO]) +torch.serialization.add_safe_globals([megatron.core.rerun_state_machine.RerunState]) +torch.serialization.add_safe_globals([megatron.core.rerun_state_machine.RerunDiagnostic]) + + + +def add_dynamic_inference_args(parser: ArgumentParser) -> ArgumentParser: + """Dynamic inference arguments.""" + + add_common_inference_args(parser) + + group = parser.add_argument_group(title='Dynamic inference') + group.add_argument( + "--inference-ckpt-non-strict", + action="store_true", + help="Load checkpoint with `strict=False`.", + ) + group.add_argument( + "--termination-id", type=int, default=None, + help="Termination ID that overrides `tokenizer.eod`." + ) + + return parser + + +def get_model() -> MegatronModule: + """Initialize model and load checkpoint.""" + + args = get_args() + + # Build model. + model = _get_model( + partial(model_provider, gpt_builder), + wrap_with_ddp=False + ) + + # Load checkpoint. + assert args.load is not None + args.exit_on_missing_checkpoint = True + load_checkpoint( + ddp_model=model, + optimizer=None, + opt_param_scheduler=None, + strict=not args.inference_ckpt_non_strict, + ) + + # No virtual PP. + assert len(model) == 1, "Above condition should have caught this" + model = model[0] + + # Eval mode. + model.eval() + + return model + + +def get_inference_context(requests: List[Request], sampling_params: SamplingParams, + calculate_max_sequence_length_from_requests: bool =True): + """The inference context manages the KV cache and other inference state.""" + + args = get_args() + # Max sequence length. + if calculate_max_sequence_length_from_requests: + max_gen_length = sampling_params.num_tokens_to_generate + max_context_length = max(len(r.prompt_tokens) for r in requests) + max_sequence_length = max_context_length + max_gen_length + else: + max_sequence_length = args.inference_max_seq_length + + # Inference context. + context = DynamicInferenceContext( + params_dtype=args.params_dtype, + num_layers=args.num_layers, + kv_channels=args.kv_channels, + num_attention_heads=( + args.num_query_groups if args.group_query_attention else args.num_attention_heads + ), + max_sequence_length=max_sequence_length, + num_cuda_graphs=( + args.inference_dynamic_batching_num_cuda_graphs if args.enable_cuda_graph else None + ), + chunk_size_tokens=args.inference_dynamic_batching_chunk_size, + buffer_size_gb=args.inference_dynamic_batching_buffer_size_gb, + buffer_guaranteed_fraction=args.inference_dynamic_batching_buffer_guaranteed_fraction, + buffer_overflow_factor=args.inference_dynamic_batching_buffer_overflow_factor, + max_requests_override=args.inference_dynamic_batching_max_requests_override, + max_tokens_override=args.inference_dynamic_batching_max_tokens_override, + tensor_model_parallel_size=args.tensor_model_parallel_size, + materialize_only_last_token_logits=not args.return_log_probs, + cache_mla_latent=args.multi_latent_attention and args.cache_mla_latents, + kv_lora_rank=args.kv_lora_rank if args.multi_latent_attention else None, + qk_pos_emb_head_dim=args.qk_pos_emb_head_dim, + use_flashinfer_fused_rope=args.use_flashinfer_fused_rope, + ) + + return context + + +def get_inference_controller( + model: MegatronModule, context: DynamicInferenceContext +) -> TextGenerationController: + """Buid text generation controller, which manages the model inference context. + + Args: + model (MegatronModule): Megatron GPT model. + context (DynamicInferenceContext): Context for managing KV cache. + + Return: + (TextGenerationController) Inference text generation controller. + """ + + args = get_args() + if args.legacy_tokenizer: + tokenizer = get_tokenizer() + else: + tokenizer = build_tokenizer(args) + + # Wrap model in inference wrapper. + model = GPTInferenceWrapper(model, args, context) + + # Note: the following is taken from AbstractModelInferenceWrapper.prep_model_for_inference(). + from megatron.core import parallel_state + + model.model_is_pipeline_parallel = not ( + parallel_state.is_pipeline_first_stage() and parallel_state.is_pipeline_last_stage() + ) + + # Text generation controller. + controller = TextGenerationController(model, tokenizer) + + return controller + + +def run_inference( + requests: List[Request], sampling_params: SamplingParams, engine: DynamicInferenceEngine +) -> List[Dict[str, float]]: + """Add requests to engine and generate tokens. + + Args: + requests (List[Request]): Requests that are to be added and processed. + sampling_params (SamplingParams): Sampling params for the logits. + engine (DynamicInferenceEngine): Inference engine that manages generating tokens. + + Return: + A dictionary of step times with `prefill` and `decode` keys. + """ + + args = get_args() + + # Initialize request arrival times. + base_arrival_time = get_curr_time() + for request in requests: + request.time_arrival = request.time_offset + base_arrival_time + + # Add and process requests. + num_requests_total = len(requests) + num_requests_added = 0 + num_requests_finished = 0 + step_id = 0 + step_times = {"prefill": [], "decode": []} + add_times = [] + output_times = [] + tbar = tqdm(total=num_requests_total) + total_output_tokens = 0 + if args.enable_cuda_graph: + cuda_graph_request_count_map = {r:0 for r in engine.context.cuda_graph_request_counts} + else: + cuda_graph_request_count_map = None + + def _add_request(): + """Add request to engine. + + *Note: Using `prompt_text` instead of `prompt_tokens` for fair comparison. + """ + nonlocal num_requests_added + _request = requests[num_requests_added] + engine.add_request( + num_requests_added, + _request.prompt_text, + sampling_params.num_tokens_to_generate, + ) + _request.time_start = get_curr_time() + _request.state = "started" + num_requests_added += 1 + tbar.update(1) + + while True: + # Add requests. + add_start = get_curr_time() + if args.incoming_requests_per_step is None: + # Add requests with 'earlier' arrival time. + while num_requests_added < num_requests_total: + if requests[num_requests_added].time_arrival > add_start: + break + _add_request() + else: + # Add deterministic number of requests (generally used for debugging). + for i in range(min( + args.incoming_requests_per_step, + num_requests_total - num_requests_added, + )): + _add_request() + add_times.append(get_curr_time() - add_start) + + # Step inference engine (i.e., generate a token for each active request). + # Before step, we haven't done the scheduling, so we cannot know the is_decode_only + result = engine.step_modern(sampling_params, verbose=True) + # After step, we lost track of last iteration's is_decode_only, so we need to get it from the engine + is_decode_only = engine.is_decode_only + step_id += 1 + + # Record cuda_graph_request_count. + cuda_graph_request_count = result["cuda_graph_request_count"] + if args.enable_cuda_graph and cuda_graph_request_count is not None: + cuda_graph_request_count_map[cuda_graph_request_count] += 1 + + # Update requests. + active_requests = result["active_requests"] + finished_requests = result["finished_requests"] + step_time = result["step_time"] + if len(active_requests) > 0 or len(finished_requests) > 0: + if is_decode_only: + step_times["decode"].append(step_time) + else: + step_times["prefill"].append(step_time) + + # Append output tokens. + output_start = get_curr_time() + for finished_request in finished_requests: + request = requests[finished_request.request_id] + request.output_tokens = finished_request.generated_tokens + total_output_tokens += len(request.output_tokens) + request.time_end = get_curr_time() + request.output_text = finished_request.generated_text + request.state = "finished" + request.request_id = finished_request.request_id + if sampling_params.return_log_probs: + request.log_probs = ( + finished_request.prompt_log_probs + finished_request.generated_log_probs + ) + num_requests_finished += 1 + output_times.append(get_curr_time() - output_start) + + # Check if all requests are finished. + if not (engine.has_unfinished_requests() or num_requests_added < num_requests_total): + break + + return { + "step_times" : step_times, + "add_times" : add_times, + "output_times" : output_times, + "total_output_tokens" : total_output_tokens, + "cuda_graph_request_count_map" : cuda_graph_request_count_map, + } + + +@torch.inference_mode() +def main(): + + # Initialize Megatron. + initialize_megatron( + extra_args_provider=add_dynamic_inference_args, + args_defaults={'no_load_rng': True, 'no_load_optim': True}, + ) + + # Start Nsight profiler. + if os.environ.get("NSIGHT_PREFIX"): + torch.cuda.cudart().cudaProfilerStart() + + configure_nvtx_profiling(True) + + args = get_args() + if args.legacy_tokenizer: + tokenizer = get_tokenizer() + else: + tokenizer = build_tokenizer(args) + + # Sampling params. + sampling_params = SamplingParams( + temperature=args.temperature, + top_k=args.top_k, + top_p=args.top_p, + return_log_probs=args.return_log_probs, + num_tokens_to_generate=args.num_tokens_to_generate, + ) + + # Requests, context, conroller. + model = get_model() + requests = build_requests(args, tokenizer) + context = get_inference_context(requests, sampling_params) + controller = get_inference_controller(model, context) + + # Validate all context_length's <= max_tokens. + invalid_prompt_length_map = {} + for request_idx, request in enumerate(requests): + if len(request.prompt_tokens) > context.max_tokens: + invalid_prompt_length_map[request_idx] = len(request.prompt_tokens) + assert not invalid_prompt_length_map, ( + "request idxs with prompts longer than context.max_tokens: " + ", ".join(f"{k}({v})" for k, v in invalid_prompt_length_map.items()) + ) + + # Inference engine. + engine = DynamicInferenceEngine( + controller, + context, + termination_id=args.termination_id if args.termination_id is not None else tokenizer.eod, + enable_cuda_graph=args.enable_cuda_graph, + random_seed=args.seed, + track_paused_request_events=args.inference_dynamic_batching_track_paused_request_events, + enable_chunked_prefill=not args.disable_chunked_prefill, + ) + + setup_prefix = build_dynamic_engine_setup_prefix(args, model, context, requests) + print("~~~") + print(setup_prefix) + print("~~~") + + # Run and time test. + t = get_curr_time() + result = run_inference(requests, sampling_params, engine) + step_times = result["step_times"] + add_times = result["add_times"] + output_times = result["output_times"] + total_output_tokens = result["total_output_tokens"] + torch.cuda.synchronize() + total_time = get_curr_time() - t + + # Validate all requests finished. + for request in requests: + assert request.state == "finished", ( + f"request.state == '{request.state}' != 'finished'." + ) + + # Print unique prompts + outputs. + if torch.distributed.get_rank() == 0: + + def escape_str(s): + return s.replace("\n", "\\n") + + print("~~~~ Unique prompts + outputs. ~~~~") + + # Map requests by their prompt. + unique_prompt_map = defaultdict(list) + for request_idx, request in enumerate(requests): + unique_prompt_map[request.prompt_text].append(request_idx) + + # Print unique prompts + outputs. + for unique_idx, (prompt_text, request_idxs) in enumerate(unique_prompt_map.items()): + # ---- Prompt summary line ---- + prompt_len = len(requests[request_idxs[0]].prompt_tokens) + escaped_prompt_text = escape_str(prompt_text) + print(f"{unique_idx+1}/{len(unique_prompt_map)} [n {len(request_idxs)}, l {prompt_len}] {escaped_prompt_text}") + + # ---- Group all outputs for this prompt ---- + output_map = defaultdict(list) + for idx in request_idxs: + req = requests[idx] + output_map[req.output_text].append(idx) + + # ---- Print each unique output ---- + for output_text, output_request_idxs in output_map.items(): + if output_text is not None: + o_hash = hashlib.sha256(output_text.encode()).hexdigest()[:6] + o_len = len(requests[output_request_idxs[0]].output_tokens) + escaped_output_text = escape_str(output_text) + print(f" >>>> [n {len(output_request_idxs)}, l {o_len}, hash {o_hash}] {escaped_output_text}") + else: + o_hash = "--" + o_len = 0 + escaped_output_text = "--" + print(f" >>>> [n {len(output_request_idxs)}, {o_len} tokens, hash {o_hash}] {escaped_output_text}") + + # Write results to JSON. Primarily used for functional testing. + if args.output_path: + json_results = {} + + # Write every 'n' requests, plus the final request. + for req in [ *requests[::args.output_every_n_results], requests[-1] ]: + result_dict = { + "input_prompt": req.prompt_text, + "generated_text": req.output_text, + "generated_tokens": req.output_tokens, + "latency": req.time_end - req.time_start, + "cuda_graph_request_count_map" : result["cuda_graph_request_count_map"], + "step_count" : engine.step_count, + } + if sampling_params.return_log_probs: + response_logprobs = req.log_probs + result_dict["logprobs"] = response_logprobs + json_results[req.request_id] = result_dict + with open(args.output_path, "w") as fp: + json.dump(json_results, fp, indent=1) + + # Timing results. + stats = torch.cuda.memory_stats() + throughput = total_output_tokens / total_time + print("~~~") + peak_alloc_gb = stats["allocated_bytes.all.peak"] / 1024**3 + peak_resvd_gb = stats["reserved_bytes.all.peak"] / 1024**3 + + p_times = step_times["prefill"] + d_times = step_times["decode"] + + p_total = sum(p_times) + d_total = sum(d_times) + + p_count = len(p_times) + d_count = len(d_times) + + p_mean = p_total / p_count + d_mean = d_total / d_count + + # Commented out for now as the step/add/output times are not calculated correctly. + # print( + # f"{setup_prefix} … " + # f"mem {peak_alloc_gb:.1f}/{peak_resvd_gb:.1f} GB … " + # f"total time: {step_total:.3f}s … " + # f"step time: total {step_total:.3f}s " + # f"[ p {p_total:.3f}s, d {d_total:.3f}s ], " + # f"mean [ p {p_mean:.3f}s, d {d_mean:.3f}s ], " + # f"count [ p {p_count}, d {d_count} ]." + # ) + capture_str = ( + f"{engine.capture_stats["time"]:.2f} sec" + if engine.capture_stats else + "--" + ) + print( + f"{setup_prefix} … " + f"capture {capture_str} … " + f"mem {peak_alloc_gb:.1f}/{peak_resvd_gb:.1f} GB … " + f"total time: {total_time:.3f}s … " + f"steps: {engine.step_count:d} … " + f"throughput: {throughput:.3f} tok/s" + ) + print("~~~") + + # Stop Nsight profiler. + if os.environ.get("NSIGHT_PREFIX"): + torch.cuda.cudart().cudaProfilerStop() + + +if __name__ == "__main__": + main() diff --git a/examples/inference/gpt/gpt_dynamic_inference_12b.sh b/examples/inference/gpt/gpt_dynamic_inference_12b.sh new file mode 100644 index 0000000000..b469adadea --- /dev/null +++ b/examples/inference/gpt/gpt_dynamic_inference_12b.sh @@ -0,0 +1,119 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + +# Run dynamic batching inference on the 12B GPT model. + +set -u + +# Libraries. +pip install simpy +pip install sentencepiece +pip install tiktoken + +# Environment variables. +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +# Checkpoint. +: ${CHECKPOINT_DIR:?"CHECKPOINT_DIR is not set"} +: ${TOKENIZER_MODEL:?"TOKENIZER_MODEL is not set"} + +# Prompts. +: ${NUM_TOKENS_TO_PROMPT="8 32"} +: ${NUM_TOKENS_TO_GENERATE=256} +: ${INCOMING_REQUESTS_DURATION=10.} +: ${INCOMING_REQUESTS_PER_SEC=100.} + +# Dynamic context. +: ${BUFFER_SIZE_GB=50.} +: ${BUFFER_OVERFLOW_FACTOR=1.} +: ${BUFFER_GUARANTEED_FRACTION=0.05} + +# Cuda graphs. +: ${ENABLE_CUDA_GRAPHS=1} +: ${NUM_CUDA_GRAPHS=16} +: ${CUDA_GRAPH_SHARE_IO_BUFFERS=1} + +# Miscellaneous. +: ${ENGINE=dynamic} +: ${EXTRA_ARGS=""} +# NSIGHT_PREFIX=/path/to/nsight/profile + +# Arguments. +ARGS=" \ + --no-persist-layer-norm \ + --apply-layernorm-1p \ + --no-position-embedding \ + --group-query-attention \ + --num-query-groups 8 \ + --load ${CHECKPOINT_DIR} \ + --use-checkpoint-args \ + --untie-embeddings-and-output-weights \ + --disable-bias-linear \ + --use-rotary-position-embeddings \ + --position-embedding-type rope \ + --rotary-base 1000000 \ + --rotary-percent 1.0 \ + --swiglu \ + --normalization RMSNorm \ + --attention-dropout 0.0 \ + --hidden-dropout 0.0 \ + --exit-duration-in-mins 5740 \ + --tensor-model-parallel-size 1 \ + --pipeline-model-parallel-size 1 \ + --num-layers 40 \ + --hidden-size 5120 \ + --ffn-hidden-size 14336 \ + --num-attention-heads 32 \ + --kv-channels 128 \ + --seq-length 1024 \ + --max-position-embeddings 1024 \ + --micro-batch-size 64 \ + --bf16 \ + --tokenizer-type TikTokenizer \ + --tiktoken-pattern v2 \ + --tokenizer-model ${TOKENIZER_MODEL} \ + --distributed-timeout-minutes 2400 \ + --transformer-impl local \ + --use-flash-attn \ + --inference-rng-tracker \ + \ + --inference-dynamic-batching \ + --inference-dynamic-batching-buffer-size-gb ${BUFFER_SIZE_GB} \ + --inference-dynamic-batching-buffer-overflow-factor ${BUFFER_OVERFLOW_FACTOR} \ + --inference-dynamic-batching-buffer-guaranteed-fraction ${BUFFER_GUARANTEED_FRACTION} \ + \ + ${EXTRA_ARGS} \ +" + +# Cuda graphs. +if [ "${ENABLE_CUDA_GRAPHS}" = 1 ]; then + ARGS+=" \ + --enable-cuda-graph \ + --inference-dynamic-batching-num-cuda-graphs ${NUM_CUDA_GRAPHS} \ + " +fi + +# Prompts. +if [[ -v PROMPTS ]]; then + ARGS+=" \ + --prompts ${PROMPTS} \ + --num-tokens-to-generate ${NUM_TOKENS_TO_GENERATE} \ + " +else + ARGS+=" \ + --num-tokens-to-prompt ${NUM_TOKENS_TO_PROMPT} \ + --num-tokens-to-generate ${NUM_TOKENS_TO_GENERATE} \ + --incoming-requests-duration ${INCOMING_REQUESTS_DURATION} \ + --incoming-requests-per-sec ${INCOMING_REQUESTS_PER_SEC} \ + " +fi + +# Command. +CMD="python -m examples.inference.gpt.gpt_${ENGINE}_inference ${ARGS}" +if [[ -v NSIGHT_PREFIX ]]; then + CMD="nsys profile -s none -t nvtx,cuda --cudabacktrace=all --cuda-graph-trace=node --python-backtrace=cuda --wait all -o ${NSIGHT_PREFIX} --force-overwrite true --capture-range=cudaProfilerApi --capture-range-end=stop ${CMD}" +fi + +echo "~~~" +echo "CMD ... ${CMD}." +echo "~~~" +eval ${CMD} diff --git a/examples/inference/gpt/gpt_dynamic_inference_357m.sh b/examples/inference/gpt/gpt_dynamic_inference_357m.sh new file mode 100644 index 0000000000..bbe60614f6 --- /dev/null +++ b/examples/inference/gpt/gpt_dynamic_inference_357m.sh @@ -0,0 +1,105 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + +# Run dynamic batching inference on the 357M GPT model. + +set -u + +# Libraries. +pip install simpy +pip install sentencepiece +pip install tiktoken + +# Environment variables. +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +# Checkpoint. +: ${CHECKPOINT_DIR:?"CHECKPOINT_DIR is not set"} +: ${VOCAB_FILE:?"VOCAB_FILE is not set"} +: ${MERGE_FILE:?"MERGE_FILE is not set"} + +# Prompts. +: ${NUM_TOKENS_TO_PROMPT="8 32"} +: ${NUM_TOKENS_TO_GENERATE=256} +: ${INCOMING_REQUESTS_DURATION=10.} +: ${INCOMING_REQUESTS_PER_SEC=100.} + +# Dynamic context. +: ${BUFFER_SIZE_GB=50.} +: ${BUFFER_OVERFLOW_FACTOR=1.} +: ${BUFFER_GUARANTEED_FRACTION=0.05} + +# Cuda graphs. +: ${ENABLE_CUDA_GRAPHS=1} +: ${NUM_CUDA_GRAPHS=16} +: ${CUDA_GRAPH_SHARE_IO_BUFFERS=1} + +# Miscellaneous. +: ${ENGINE=dynamic} +: ${EXTRA_ARGS=""} +# NSIGHT_PREFIX=/path/to/nsight/profile + +# Arguments. +ARGS=" \ + --exit-on-missing-checkpoint \ + --transformer-impl local \ + --load ${CHECKPOINT_DIR} \ + --tokenizer-type GPT2BPETokenizer \ + --vocab-file ${VOCAB_FILE} \ + --merge-file ${MERGE_FILE} \ + --exit-on-missing-checkpoint \ + --max-position-embeddings 2048 \ + --seq-length 2048 \ + --tensor-model-parallel-size 1 \ + --pipeline-model-parallel-size 1 \ + --num-layers 24 \ + --num-attention-heads 16 \ + --hidden-size 1024 \ + --bf16 \ + --micro-batch-size 1 \ + --attention-dropout 0.0 \ + --hidden-dropout 0.0 \ + --seed 42 \ + --use-flash-attn \ + --inference-rng-tracker \ + \ + --inference-dynamic-batching \ + --inference-dynamic-batching-buffer-size-gb ${BUFFER_SIZE_GB} \ + --inference-dynamic-batching-buffer-overflow-factor ${BUFFER_OVERFLOW_FACTOR} \ + --inference-dynamic-batching-buffer-guaranteed-fraction ${BUFFER_GUARANTEED_FRACTION} \ + \ + ${EXTRA_ARGS} \ +" + +# Cuda graphs. +if [ "${ENABLE_CUDA_GRAPHS}" = 1 ]; then + ARGS+=" \ + --enable-cuda-graph \ + --inference-dynamic-batching-num-cuda-graphs ${NUM_CUDA_GRAPHS} \ + " +fi + +# Prompts. +if [[ -v PROMPTS ]]; then + ARGS+=" \ + --prompts ${PROMPTS} \ + --num-tokens-to-generate ${NUM_TOKENS_TO_GENERATE} \ + " +else + ARGS+=" \ + --num-tokens-to-prompt ${NUM_TOKENS_TO_PROMPT} \ + --num-tokens-to-generate ${NUM_TOKENS_TO_GENERATE} \ + --incoming-requests-duration ${INCOMING_REQUESTS_DURATION} \ + --incoming-requests-per-sec ${INCOMING_REQUESTS_PER_SEC} \ + " +fi + +# Command. +CMD="python -m examples.inference.gpt.gpt_${ENGINE}_inference ${ARGS}" +if [[ -v NSIGHT_PREFIX ]]; then + CMD="nsys profile -s none -t nvtx,cuda --cudabacktrace=all --cuda-graph-trace=node --python-backtrace=cuda --wait all -o ${NSIGHT_PREFIX} --force-overwrite true --capture-range=cudaProfilerApi --capture-range-end=stop ${CMD}" +fi + +echo "~~~" +echo "CMD ... ${CMD}." +echo "~~~" +eval ${CMD} diff --git a/examples/inference/gpt/gpt_dynamic_inference_with_coordinator.py b/examples/inference/gpt/gpt_dynamic_inference_with_coordinator.py new file mode 100644 index 0000000000..add2ae4454 --- /dev/null +++ b/examples/inference/gpt/gpt_dynamic_inference_with_coordinator.py @@ -0,0 +1,152 @@ +from megatron.core.inference.inference_client import InferenceClient +from examples.inference.gpt.utils import add_common_inference_args +import asyncio +import torch.distributed as dist +from examples.inference.gpt.gpt_dynamic_inference import get_model, get_inference_context, get_inference_controller, add_dynamic_inference_args +from megatron.core.inference.inference_request import DynamicInferenceRequest +from megatron.training import initialize_megatron +import torch +import os +from megatron.training import get_args, get_tokenizer +from megatron.core.inference.sampling_params import SamplingParams +from examples.inference.gpt.utils import build_requests, build_dynamic_engine_setup_prefix, Request +from megatron.core.inference.engines import DynamicInferenceEngine +import time +from tqdm import tqdm +from typing import List +import json +from megatron.training.arguments import parse_args +from megatron.core import parallel_state + +async def main(engine: DynamicInferenceEngine, requests: List[Request], sampling_params: SamplingParams, port: int): + # once you call engine.start_listening_to_data_parallel_coordinator, + # the engine will start accepting requests from the data parallel coordinator. + # and processing them in an asyncio coroutine. + await engine.start_listening_to_data_parallel_coordinator(sampling_params, + inference_coordinator_port=port, + launch_inference_coordinator=True) + # if you want to use your own inference coordinator - + # 1. set launch_inference_coordinator to False + # 2. setup a router socket at tcp://MASTER_ADDR:PORT + # 3. wait for data parallel groups to establish connection (BasicInferenceCoordinator.__init__) + # 4. look at InferenceCoordinator.start() to see how we can route requests from users <-> data parallel groups + # based on headers. + # 5. look at InferenceClient to see how we create requests with headers. + if dist.get_rank() == 0: + client = InferenceClient(port) # submits requests to the inference coordinator + await client.start() + base_arrival_time = time.time_ns() / 10**9 + for request in requests: + request.time_arrival = request.time_offset + base_arrival_time + futures = [] + num_requests_total = len(requests) + num_requests_added = 0 + #tbar = tqdm(total=num_requests_total) + while True: + current_time = time.time_ns() / 10**9 + # Only add requests that have arrived at the current time. + while num_requests_added < num_requests_total and requests[num_requests_added].time_arrival <= current_time: + request = requests[num_requests_added] + # These add-request calls will queue up the request on a zmq socket and return + # instantaneously. They will return an asyncio future which can be awaited for + # request completion. + futures.append(client.add_request(request.prompt_text, + sampling_params)) + num_requests_added += 1 + #tbar.update(1) + if num_requests_added == num_requests_total: + break + # Relinquish control since there are no more requests to add at the moment. This allows the engine to run. + await asyncio.sleep(0) + # While we wait for the requests to complete, the engine runs in the background. + results: List[DynamicInferenceRequest] = await asyncio.gather(*futures) + + + if dist.get_rank() == 0: + # Write results to JSON. Primarily used for functional testing. + if args.output_path: + json_results = {} + + for req in results: + result_dict = { + "input_prompt": req.prompt, + "generated_text": req.generated_text.replace("\n", "\\n"), + "generated_tokens": req.generated_tokens, + "latency": req.latency, #InferenceClient populates this field in the returned future. + } + if sampling_params.return_log_probs: + result_dict["logprobs"] = req.prompt_log_probs + req.generated_log_probs + json_results[req.request_id] = result_dict + with open(args.output_path, "w") as fp: + json.dump(json_results, fp, indent=4) + else: + print("Results:") + for req in results: + print(f"rid: {req.request_id}\nprompt: {req.prompt!r}\noutput: {req.generated_text!r}\n\n") + + # kill the engines and suspend the client + client.stop_engines() + client.stop() + + # once the stop signal eventually makes its way to each GPU, the engines will stop. + await asyncio.gather(engine.engine_loop_task) + +if __name__ == "__main__": + # enable inference mode in the very beginning as some fp-8 optimizations + # check for it. + with torch.inference_mode(): + initialize_megatron( + #parsed_args=args + extra_args_provider=add_dynamic_inference_args, + args_defaults={'no_load_rng': True, 'no_load_optim': True}, + ) + + # Start Nsight profiler. + if os.environ.get("NSIGHT_PREFIX"): + torch.cuda.cudart().cudaProfilerStart() + + args = get_args() + tokenizer = get_tokenizer() + + # Sampling params. + sampling_params = SamplingParams( + temperature=args.temperature, + top_k=args.top_k, + top_p=args.top_p, + return_log_probs=args.return_log_probs, + num_tokens_to_generate=args.num_tokens_to_generate, + ) + + # Requests, context, conroller. + model = get_model() + requests = build_requests(args, tokenizer) if dist.get_rank() == 0 else None + + + context = get_inference_context(None, + None, + calculate_max_sequence_length_from_requests=False) + + controller = get_inference_controller(model, context) + + # Inference engine. + engine = DynamicInferenceEngine( + controller, + context, + termination_id=tokenizer.eod, + enable_cuda_graph=args.enable_cuda_graph, + random_seed=args.seed, + enable_chunked_prefill=not args.disable_chunked_prefill + ) + + + if dist.get_rank() == 0: + setup_prefix = build_dynamic_engine_setup_prefix(args, model, context, requests) + print("~~~") + print(setup_prefix) + print("~~~") + + asyncio.run(main(engine, + requests, + sampling_params, + args.inference_coordinator_port)) + diff --git a/examples/inference/gpt/gpt_static_inference.py b/examples/inference/gpt/gpt_static_inference.py new file mode 100644 index 0000000000..efe7638d93 --- /dev/null +++ b/examples/inference/gpt/gpt_static_inference.py @@ -0,0 +1,284 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + +import os +from megatron.core.inference.model_inference_wrappers.inference_wrapper_config import ( + InferenceWrapperConfig, +) +from model_provider import model_provider +from gpt_builders import gpt_builder +from mamba_builders import mamba_builder +import torch +import sys +import time +import warnings +from functools import partial +from argparse import Namespace + +import torch +import tqdm + +from megatron.core.inference.contexts import StaticInferenceContext +from megatron.core.inference.engines import StaticInferenceEngine +from megatron.core.inference.inference_request import InferenceRequest +from megatron.core.inference.model_inference_wrappers.gpt.gpt_inference_wrapper import ( + GPTInferenceWrapper, +) +from megatron.core.inference.model_inference_wrappers.inference_wrapper_config import ( + InferenceWrapperConfig, +) +from megatron.core.inference.sampling_params import SamplingParams +from megatron.core.inference.text_generation_controllers.text_generation_controller import ( + TextGenerationController, +) +from megatron.core.tokenizers.text.utils.build_tokenizer import build_tokenizer +from megatron.core.transformer.module import MegatronModule +from pretrain_gpt import model_provider as gpt_model_provider +from pretrain_mamba import model_provider as mamba_model_provider + +sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)) +) + +import asyncio +import json +from typing import Any, AsyncIterator, List + +from examples.inference.gpt.utils import add_common_inference_args, build_requests +from megatron.core import mpu +from megatron.training import get_args, get_model, get_tokenizer, print_rank_0 +from megatron.training.checkpointing import load_checkpoint +from megatron.training.initialize import initialize_megatron + + +def add_static_inference_args(parser): + """Static inference arguments.""" + + add_common_inference_args(parser) + + group = parser.add_argument_group(title='Static inference') + group.add_argument( + "--max-batch-size", + type=int, + default=None, + dest="max_batch_size", + help='Deprecated, use `--inference-max-requests` instead', + ) + group.add_argument("--stream", action="store_true", default=False, help="Stream output tokens") + + return parser + + +def get_inference_engine(args: Namespace, model: MegatronModule) -> StaticInferenceEngine: + """Utility to get the relevant backend for running inference + + This function will automatically choose the TRTLLMBackend when possible, and if not revert to Mcore backend if the user does not specify any backends. TRT LLM Backend is not implmented yet. + + Args: + args (Namespace): The user arguments parsed from command line + model (MegatronModule): The megatron model . + + Returns: + AbstractBackend: The chosen backend + """ + if args.legacy_tokenizer: + tokenizer = get_tokenizer() + else: + tokenizer = build_tokenizer(args) + inference_wrapper_config = InferenceWrapperConfig( + hidden_size=args.hidden_size, + inference_batch_times_seqlen_threshold=args.inference_batch_times_seqlen_threshold, + fp32_residual_connection=args.fp32_residual_connection, + params_dtype=args.params_dtype, + padded_vocab_size=args.padded_vocab_size, + inference_max_requests=args.inference_max_batch_size, + inference_max_seq_length=args.inference_max_seq_length, + nccl_all_reduce_for_prefill=args.nccl_all_reduce_for_prefill, + fp8=args.fp8, + ) + + inference_context = StaticInferenceContext.from_config(inference_wrapper_config) + + inference_wrapped_model = GPTInferenceWrapper( + model, inference_wrapper_config, inference_context + ) + text_generation_controller = TextGenerationController( + inference_wrapped_model=inference_wrapped_model, tokenizer=tokenizer + ) + return StaticInferenceEngine(text_generation_controller=text_generation_controller) + + +async def generate( + inference_engine: StaticInferenceEngine, sampling_params: SamplingParams, prompts: List[str] +) -> List[InferenceRequest]: + async def collect_stream(prompt, request_id, stream_generator): + print(f"Request {request_id}: {prompt}", end="", flush=True) + prev_idx = 0 + async for output in stream_generator: + print(output.generated_text[prev_idx:], end="", flush=True) + prev_idx = len(output.generated_text) + print() + + request_ids: List[str] = [ + inference_engine.add_request(prompt=prompt, sampling_params=sampling_params, streaming=True) + for prompt in prompts + ] + stream_generators = [ + inference_engine.get_stream_generator(request_id) for request_id in request_ids + ] + + tasks = [ + asyncio.create_task(collect_stream(prompt, request_id, stream_generator)) + for (prompt, request_id, stream_generator) in zip(prompts, request_ids, stream_generators) + ] + + await inference_engine.run_engine_async() + await asyncio.gather(*tasks) + + results: List[InferenceRequest] = [ + inference_engine.scheduler.completed_request_pool[request_id] for request_id in request_ids + ] + + return results + + +@torch.inference_mode() +def main(): + """Main program.""" + + # Note: The default args passed here can be overwritten by using appropriate params (check arguments.py file) + # Micro batch size is not needed to be set by user. (It is calculated based on inference-batch-times-seqlen-threshold argument) + initialize_megatron( + extra_args_provider=add_static_inference_args, + args_defaults={ + 'no_load_rng': True, + 'no_load_optim': True, + 'micro_batch_size': 1, + 'exit_on_missing_checkpoint': True, + }, + ) + + args = get_args() + + if args.max_batch_size is not None: + warnings.warn( + f"`--max-batch-size` has been deprecated in favor of `--inference-max-requests`." + ) + args.inference_max_batch_size = max(args.max_batch_size, args.inference_max_batch_size) + + # Set up model and load checkpoint + if args.model_provider == "gpt": + model_builder = gpt_builder + elif args.model_provider == "mamba": + model_builder = mamba_builder + else: + raise ValueError(f"Invalid model provider {args.model_provider}") + model = get_model(partial(model_provider, model_builder), wrap_with_ddp=False) + load_checkpoint(model, None, None, strict=False) + model = model[0] + + inference_engine = get_inference_engine(args, model) + + sampling_params = SamplingParams( + temperature=args.temperature, + top_k=args.top_k, + top_p=args.top_p, + return_log_probs=args.return_log_probs, + num_tokens_to_generate=args.num_tokens_to_generate, + top_n_logprobs=args.top_n_logprobs, + ) + + if args.legacy_tokenizer: + tokenizer = get_tokenizer() + else: + tokenizer = build_tokenizer(args) + requests = build_requests(args, tokenizer) + prompts = [r.prompt_text for r in requests] + + if args.enable_cuda_graph: + print(f"Running warmup for CUDA graphs...") + inference_engine.generate( + prompts=["warmup"], sampling_params=SamplingParams(num_tokens_to_generate=10) + ) + start_time = time.perf_counter() + if args.stream: + results: List[InferenceRequest] = asyncio.run( + generate(inference_engine, sampling_params, prompts) + ) + else: + results: List[InferenceRequest] = inference_engine.generate( + prompts=prompts, sampling_params=sampling_params + ) + end_time = time.perf_counter() + latency = end_time - start_time + + if torch.distributed.get_rank() == 0 and args.output_path: + results_output = {} + for idx, result in enumerate(results): + result_dict = { + 'input_prompt': result.prompt, + 'generated_text': result.generated_text, + 'generated_tokens': result.generated_tokens.tolist(), + 'tpot': result.tpot, + 'latency': latency, + } + if sampling_params.top_n_logprobs > 0: + result_dict['generated_top_n_logprobs'] = result.generated_top_n_logprobs + if sampling_params.return_log_probs: + response_logprobs = result.prompt_log_probs + result.generated_log_probs + result_dict["logprobs"] = response_logprobs + results_output[result.request_id] = result_dict + + with open(args.output_path, 'w') as f: + json.dump(results_output, f) + + # Print unique prompts + outputs. + if torch.distributed.get_rank() == 0: + + print("~~~~ Unique prompts + outputs. ~~~~") + + # Map results by their prompt. + from collections import defaultdict + + unique_prompt_map = defaultdict(list) + for result_idx, result in enumerate(results): + unique_prompt_map[result.prompt].append(result_idx) + + # Print unique prompts + outputs. + for unique_idx, (prompt_text, result_idxs) in enumerate(unique_prompt_map.items()): + result_idx = result_idxs[0] + result = results[result_idx] + generated_text = result.generated_text.replace("\n", "\\n") + print( + f"{unique_idx}/{len(unique_prompt_map)} [{len(result_idxs)}]. {prompt_text} " + f"... {generated_text}" + ) + + stats = torch.cuda.memory_stats() + print_rank_0( + "static | cg %d | %s | reqs %d [ batch %d ] ... mem %.1f/%.1f ... time %.3f." + % ( + args.enable_cuda_graph, + ( + f"" + if args.prompts + else " %s, %d, %.1e, %.1e" + % ( + "(%s)" % " ".join(map(str, args.num_tokens_to_prompt)), + args.num_tokens_to_generate, + args.incoming_requests_duration, + args.incoming_requests_per_sec, + ) + ), + len(requests), + args.inference_max_batch_size, + stats["allocated_bytes.all.peak"] / (1024**3), + stats["reserved_bytes.all.peak"] / (1024**3), + latency, + ) + ) + + torch.distributed.destroy_process_group() + + +if __name__ == "__main__": + main() diff --git a/examples/inference/gpt/utils.py b/examples/inference/gpt/utils.py new file mode 100644 index 0000000000..89c51ef882 --- /dev/null +++ b/examples/inference/gpt/utils.py @@ -0,0 +1,377 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + +import json +import itertools +import random +import time +import torch +from argparse import ArgumentParser, Namespace +from tqdm import tqdm +from typing import Any, List, Optional + +from megatron.core.inference.inference_request import DynamicInferenceRequest +from megatron.core.inference.contexts import DynamicInferenceContext +from megatron.core.transformer.module import MegatronModule + + + +def add_common_inference_args(parser: ArgumentParser) -> ArgumentParser: + """Common inference arguments.""" + + group = parser.add_argument_group(title='Common inference') + + group.add_argument("--temperature", type=float, default=1.0, help='Sampling temperature.') + group.add_argument("--top_k", type=int, default=1, help='Top k sampling.') + group.add_argument("--top_p", type=float, default=0.0, help='Top p sampling.') + group.add_argument( + "--return-log-probs", + action='store_true', + default=False, + help='Return the log probabilities of the final output tokens', + ) + group.add_argument( + "--prompts", + metavar='N', + type=str, + nargs='+', + help='Input prompts with each prompt within quotes and seperated by space', + ) + group.add_argument( + "--num-tokens-to-prompt", + type=int, + nargs="+", + default=[64, 1024], + help='Number of tokens to use for simulated prompts. This should be a ' + 'space-separated pair of integers, and the generated prompt lengths will ' + 'be uniformly sampled within this range.', + ) + group.add_argument( + "--num-tokens-to-generate", + type=int, + default=30, + help='Number of tokens to generate for each prompt', + ) + group.add_argument( + "--top-n-logprobs", + type=int, + default=0, + help='Return the top n logprobs for the generated tokens and their corresponding token as a dictionary', + ) + group.add_argument( + "--incoming-requests-per-step", + type=int, default=None, + help="Add a deterministic number of requests per step. This arg is " + "prioritized over `--incoming-requests-per-sec` below (which is non-" + "deterministic). Note that the number of requests added per step is " + "additionally limited by the inference context's `max_requests`, " + "`max_tokens`, and KV buffer size.", + ) + group.add_argument( + "--incoming-requests-per-sec", + type=float, + default=100.0, + help="Simulated number of requests per second. Set to -1 to add all requests together.", + ) + group.add_argument( + "--incoming-requests-duration", + type=float, + default=10.0, + help="Total amount of time to simulate that requests are " + "arriving. Multiply this value with " + "`--incoming-requests-per-sec` to get the approximate " + "total number of requests. Set to -1 to add all requests together.", + ) + group.add_argument( + "--model-provider", + choices=["mamba", "gpt"], + default="gpt", + help="Model provider", + ) + group.add_argument( + "--output-path", + type=str, + default=None, + help="Path to save generations as JSON", + ) + group.add_argument( + "--output-every-n-results", + type=int, + default=1, + help="To minimize the output file size of larger runs, only write the " + "results of every `n` requests.", + ) + group.add_argument( + "--prompt-file", + help='Jsonl file containing input prompts, where each item (i.e., line) ' + 'contains the field \'text\' where the value is the prompt. All other ' + 'fields within each item are ignored, and may be customized for each ' + 'application.', + ) + group.add_argument( + "--prompt-file-num-truncate", + type=int, + help='Number of samples to use from the loaded prompt file (see ' + '`--prompt-file` above). The first `--prompt-file-num-truncate` samples ' + 'will be used, in order.', + ) + group.add_argument( + "--inference-coordinator-port", + type=int, + help="This port will be used to setup the inference co-ordinator on node-0", + default=12346 + ) + group.add_argument( + "--use-flashinfer-fused-rope", + action='store_true', + default=False, + help='Use flashinfer fused rope implementation.', + ) + + return parser + + +def get_curr_time() -> float: + """Get synchronized time across ranks.""" + curr_time = torch.cuda.LongTensor([time.time_ns()]) + if torch.distributed.is_initialized(): + torch.distributed.broadcast(curr_time, src=0) + return curr_time.item() / 10**9 + + +class Request: + """Class to hold attributes for a single request. + + A request is initialized with its prompt text. As it is added, processed, + and completed through the inference engine, the request is populated with its + start time, end time, and output tokens. + + Args: + prompt_text (str): Prompt text. + time_offset (float): Artificial time offset for simulating incoming + requests. This value is later added to the `base_arrival_time` to + simulate the requests arrival time. + tokenizer (Any): Tokenizer for tokenizing the prompt. + """ + + def __init__(self, prompt_text: str, time_offset: float, tokenizer: Any): + self.prompt_text = prompt_text + self.prompt_tokens = tokenizer.tokenize(prompt_text) + self.output_text = None + self.output_tokens = [] + self.time_offset = time_offset + self.time_arrival = None + self.time_start = None + self.time_end = None + self.state = "not-started" + + def __str__(self) -> str: + return "state '%s'; toffset %.1e; prompt len %d; output len %d; '%s'" % ( + self.state, + self.time_offset, + len(self.prompt_tokens), + len(self.output_tokens), + self.prompt_text, + ) + + +def get_time_offsets( + seed: int | None, + incoming_requests_per_step: int, + incoming_requests_per_sec: float, + num_requests: int, +) -> list[float]: + """Get example time offsets.""" + + # Time offsets to add all requests at once. + if incoming_requests_per_step is not None or incoming_requests_per_sec <= 0: + return [-1] * num_requests + + # if num_requests is not None: + incoming_requests_duration = num_requests / incoming_requests_per_sec + incoming_requests_duration *= 2 # extra margin, to accomodate time sampling + + random.seed(seed) + + import simpy # Guard against this import in test case + + # Generate random time offsets. + def arrival(r): + while True: + yield env.timeout(random.expovariate(r)) + time_offsets.append(env.now) + + time_offsets = [] + env = simpy.Environment() + env.process(arrival(incoming_requests_per_sec)) + env.run(incoming_requests_duration) + + # Ensure at least a single request. + if len(time_offsets) == 0: + time_offsets = [0.0] + + # Truncate to num_requests. + assert len(time_offsets) >= num_requests + time_offsets = time_offsets[:num_requests] + + return time_offsets + + +def get_cli_requests(args: Namespace, tokenizer: Any) -> list[Request]: + + # Get time offsets. + time_offsets = get_time_offsets( + args.seed, + args.incoming_requests_per_step, + args.incoming_requests_per_sec, + len(args.prompts), + ) + + # Init requests. + requests = [Request(p, t, tokenizer) for p,t in zip(args.prompts, time_offsets)] + return requests + + +def get_synthetic_requests(args: Namespace, tokenizer: Any) -> list[Request]: + """Get example requests.""" + + # Get time offsets. + time_offsets = get_time_offsets( + args.seed, + args.incoming_requests_per_step, + args.incoming_requests_per_sec, + int(args.incoming_requests_per_sec * args.incoming_requests_duration), + ) + + # Init requests. + requests = [ + Request("hi " * random.randint(*args.num_tokens_to_prompt), t, tokenizer) + for t in time_offsets + ] + + return requests + + +def get_requests_from_file(args: Namespace, tokenizer: Any) -> list[Request]: + """Get requests from a file.""" + if not args.prompt_file: + raise ValueError("Prompt file is required to read requests from a file.") + + # Load prompts. + n_prompts = sum(1 for _ in open(args.prompt_file)) + prompts = [] + with open(args.prompt_file) as f: + for line in tqdm(f.readlines(), "read prompt file", total=n_prompts): + prompts.append(json.loads(line)["text"]) + if len(prompts) == args.prompt_file_num_truncate: + break + + # Get time offsets. + time_offsets: list[float] = get_time_offsets( + args.seed, + args.incoming_requests_per_step, + args.incoming_requests_per_sec, + len(prompts), + ) + + # Init requests. + requests = [ + Request(p, t, tokenizer) + for p, t in tqdm(zip(prompts, time_offsets), "init requests", total=len(prompts)) + ] + + return requests + + +def build_requests(args: Namespace, tokenizer: Any) -> list[Request]: + # Check if we have any prompts (from command line or JSONL) + if args.prompts: + if args.prompt_file: + raise ValueError("Cannot use both --prompts and --prompt-file") + return get_cli_requests(args, tokenizer) + elif args.prompt_file: + return get_requests_from_file(args, tokenizer) + else: + return get_synthetic_requests(args, tokenizer) + + +def get_model_size_str(model): + n = sum(p.numel() for p in model.parameters()) + for exp, suffix in ((12, "t"), (9, "b"), (6, "m"), (3, "k"), (0, "")): + nquery = int(10**exp) + if n > nquery: + return "%d%s" % (n // nquery, suffix) + raise Exception("something went wrong.") + + +def build_dynamic_engine_setup_prefix( + args: Namespace, + model: MegatronModule, + context: DynamicInferenceContext, + requests: list[DynamicInferenceRequest], +): + """ + Returns a compact, pipe-separated summary of the dynamic-batching setup. + + Example output: + + `dynamic | cg True | prompts: synth(16 256), n 1024, g 512, t 1.0e+02 5.0e-01 | bf 4, 1.2 [r 1024, t 8192] | gtd 0.50 [r 512] | reqs 100` # pylint: disable=line-too-long + + Args: + args (Namespace): Command-line arguments for this run. + context (DynamicInferenceContext): Stores limits such as `max_requests`, + `max_tokens`, and `gtd_request_count`. + requests (List[DynamicInferenceRequest]): List of inference requests. + + Returns: + A configuration string for logging. + """ + # CUDA graph config + if args.enable_cuda_graph: + cg_str = ( + f"graphs {context.cuda_graph_token_counts[0]}:" + f"{context.cuda_graph_token_counts[-1]}" + ) + else: + cg_str = "--" + + # Prompt description + prompt_src_str = ( + "cli" if args.prompts else + "file" if args.prompt_file else + f"synth({', '.join(map(str, args.num_tokens_to_prompt))})" + ) + request_str = ( + f"requests: {prompt_src_str}, " + f"n {len(requests):d}, g {args.num_tokens_to_generate:d}, " + ) + request_str += ( + f"dur {args.incoming_requests_duration:.1e} " + f"r/sec {args.incoming_requests_per_sec:.1e}" + if args.incoming_requests_per_step is None else + f"r/step {args.incoming_requests_per_step}" + ) + + # Buffer limits config + flw = args.inference_dynamic_batching_buffer_overflow_factor + flw_str = "no overflow" if flw is None else f"{flw:.1f}" + buffer_limits_str = ( + f"bf {args.inference_dynamic_batching_buffer_size_gb:.0f}, {flw_str} " + f"[r {context.max_requests}, t {context.max_tokens}]" + ) + + # Guaranteed request config + guaranteed_fraction_str = ( + f"gtd {args.inference_dynamic_batching_buffer_guaranteed_fraction:.2f} " + f"[r {context.gtd_request_count}]" + ) + + parts = [ + get_model_size_str(model), + "dynamic", + cg_str, + request_str, + buffer_limits_str, + guaranteed_fraction_str, + ] + + return " | ".join(parts) diff --git a/examples/inference/llama_mistral/huggingface_reference.py b/examples/inference/llama_mistral/huggingface_reference.py new file mode 100644 index 0000000000..9d8f4465f6 --- /dev/null +++ b/examples/inference/llama_mistral/huggingface_reference.py @@ -0,0 +1,25 @@ +import argparse +from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer + +# Set up argument parsing +parser = argparse.ArgumentParser(description="Script for text generation with a specific model and prompt.") +parser.add_argument('--prompt', type=str, required=True, help="Prompt text to use for text generation") +parser.add_argument('--model-path', type=str, required=True, help="Path to the Huggingface model checkpoint") + +# Parse command-line arguments +args = parser.parse_args() + +model_path = args.model_path +prompt = args.prompt + +config = AutoConfig.from_pretrained(model_path) +tokenizer = AutoTokenizer.from_pretrained(model_path, config=config) +model = AutoModelForCausalLM.from_pretrained(model_path, config=config).cuda() + +inputs = tokenizer(prompt, return_tensors="pt") +for key in inputs: + inputs[key] = inputs[key].cuda() +# top_k, top_p and do_sample are set for greedy argmax based sampling + +outputs = model.generate(**inputs, max_length=100, do_sample=False, top_p=0, top_k=0, temperature=1.0) +print(tokenizer.decode(outputs[0], skip_special_tokens=True)) \ No newline at end of file diff --git a/examples/inference/llama_mistral/run_static_inference_llama4_scout.sh b/examples/inference/llama_mistral/run_static_inference_llama4_scout.sh new file mode 100755 index 0000000000..cc8cfac5e6 --- /dev/null +++ b/examples/inference/llama_mistral/run_static_inference_llama4_scout.sh @@ -0,0 +1,68 @@ +#!/bin/bash +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NVTE_APPLY_QK_LAYER_SCALING=0 + +DISTRIBUTED_ARGS="--nproc_per_node 8 \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr 0.0.0.0 \ + --master_port 6000" + +# Fill in checkpoint path to Llama 4 Scout to run +CHECKPOINT= +PROMPTS="What is the capital of France?" +TOKENS_TO_GENERATE=4 +MAX_BATCH_SIZE=2 + +MODEL_ARGS=" \ + --micro-batch-size 1 \ + --bf16 \ + --no-masked-softmax-fusion \ + --disable-bias-linear \ + --untie-embeddings-and-output-weights \ + --position-embedding-type rope \ + --no-rope-fusion \ + --normalization RMSNorm \ + --swiglu \ + --num-layers 48 \ + --hidden-size 5120 \ + --ffn-hidden-size 16384 \ + --num-attention-heads 40 \ + --group-query-attention \ + --num-query-groups 8 \ + --qk-layernorm \ + --num-experts 16 \ + --moe-ffn-hidden-size 8192 \ + --moe-router-score-function sigmoid \ + --moe-router-topk 1 \ + --moe-router-topk-scaling-factor 1.0 \ + --moe-shared-expert-intermediate-size 8192 \ + --moe-aux-loss-coeff 1e-3 \ + --moe-token-dispatcher-type alltoall \ + --moe-token-drop-policy probs \ + --moe-router-load-balancing-type seq_aux_loss \ + --seq-length 4096 \ + --max-position-embeddings 4096 \ + --tokenizer-type HuggingFaceTokenizer \ + --make-vocab-size-divisible-by 128 \ + --use-mcore-models \ + --rotary-interleaved \ + --rotary-percent 1.0 \ + --rotary-base 500000 \ + --rope-scaling-factor 8.0 \ + --use-rope-scaling \ + --no-bias-swiglu-fusion \ + --qk-l2-norm \ + --moe-apply-probs-on-input \ + --moe-router-dtype fp64 \ +" + +torchrun $DISTRIBUTED_ARGS -m examples.inference.gpt.gpt_static_inference \ + --load ${CHECKPOINT} \ + --tokenizer-model unsloth/Llama-4-Scout-17B-16E-Instruct \ + --dist-ckpt-strictness log_unexpected \ + --tensor-model-parallel-size 8 \ + --prompts ${PROMPTS} \ + --num-tokens-to-generate ${TOKENS_TO_GENERATE} \ + --max-batch-size ${MAX_BATCH_SIZE} \ + ${MODEL_ARGS} diff --git a/examples/inference/llama_mistral/run_text_generation_llama3.1.sh b/examples/inference/llama_mistral/run_text_generation_llama3.1.sh new file mode 100755 index 0000000000..06584f0917 --- /dev/null +++ b/examples/inference/llama_mistral/run_text_generation_llama3.1.sh @@ -0,0 +1,56 @@ +#!/bin/bash +# This example will start serving the Llama3.1-8B model +export NCCL_IB_SL=1 +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NVTE_APPLY_QK_LAYER_SCALING=0 + +DISTRIBUTED_ARGS="--nproc_per_node 1 \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr 0.0.0.0 \ + --master_port 6000" + +# Ensure CHECKPOINT and TOKENIZER_MODEL are provided +if [ -z "$1" ] || [ -z "$2" ]; then + echo "Error: You must provide CHECKPOINT and TOKENIZER_MODEL as command-line arguments." + echo "Usage: $0 /path/to/checkpoint /path/to/tokenizer_model" + exit 1 +fi + +# Assign command-line arguments to variables +CHECKPOINT=$1 +TOKENIZER_MODEL=$2 + +pip install flask-restful + +torchrun $DISTRIBUTED_ARGS tools/run_text_generation_server.py \ + --use-checkpoint-args \ + --disable-bias-linear \ + --tokenizer-type HuggingFaceTokenizer \ + --tokenizer-model ${TOKENIZER_MODEL} \ + --transformer-impl transformer_engine \ + --normalization RMSNorm \ + --group-query-attention \ + --num-query-groups 8 \ + --no-masked-softmax-fusion \ + --attention-softmax-in-fp32 \ + --attention-dropout 0.0 \ + --hidden-dropout 0.0 \ + --untie-embeddings-and-output-weights \ + --position-embedding-type rope \ + --rotary-percent 1.0 \ + --rotary-base 500000 \ + --use-rope-scaling \ + --use-rotary-position-embeddings \ + --swiglu \ + --tensor-model-parallel-size 1 \ + --pipeline-model-parallel-size 1 \ + --num-layers 32 \ + --hidden-size 4096 \ + --ffn-hidden-size 14336 \ + --load ${CHECKPOINT} \ + --num-attention-heads 32 \ + --max-position-embeddings 131072 \ + --bf16 \ + --micro-batch-size 1 \ + --seq-length 8192 diff --git a/examples/inference/llama_mistral/run_text_generation_llama3.sh b/examples/inference/llama_mistral/run_text_generation_llama3.sh new file mode 100755 index 0000000000..c5fc4103ab --- /dev/null +++ b/examples/inference/llama_mistral/run_text_generation_llama3.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# This example will start serving the Llama3-8B model +export NCCL_IB_SL=1 +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NVTE_APPLY_QK_LAYER_SCALING=0 + +DISTRIBUTED_ARGS="--nproc_per_node 1 \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr 0.0.0.0 \ + --master_port 6000" + +# Ensure CHECKPOINT and TOKENIZER_MODEL are provided +if [ -z "$1" ] || [ -z "$2" ]; then + echo "Error: You must provide CHECKPOINT and TOKENIZER_MODEL as command-line arguments." + echo "Usage: $0 /path/to/checkpoint /path/to/tokenizer_model" + exit 1 +fi + +# Assign command-line arguments to variables +CHECKPOINT=$1 +TOKENIZER_MODEL=$2 + +pip install flask-restful + +torchrun $DISTRIBUTED_ARGS tools/run_text_generation_server.py \ + --use-checkpoint-args \ + --disable-bias-linear \ + --tokenizer-type HuggingFaceTokenizer \ + --tokenizer-model ${TOKENIZER_MODEL} \ + --transformer-impl transformer_engine \ + --normalization RMSNorm \ + --group-query-attention \ + --num-query-groups 8 \ + --no-masked-softmax-fusion \ + --attention-softmax-in-fp32 \ + --attention-dropout 0.0 \ + --hidden-dropout 0.0 \ + --untie-embeddings-and-output-weights \ + --position-embedding-type rope \ + --rotary-percent 1.0 \ + --rotary-base 500000 \ + --use-rotary-position-embeddings \ + --swiglu \ + --tensor-model-parallel-size 1 \ + --pipeline-model-parallel-size 1 \ + --num-layers 32 \ + --hidden-size 4096 \ + --ffn-hidden-size 14336 \ + --load ${CHECKPOINT} \ + --num-attention-heads 32 \ + --max-position-embeddings 8192 \ + --bf16 \ + --micro-batch-size 1 \ + --seq-length 8192 diff --git a/examples/inference/llama_mistral/run_text_generation_mistral.sh b/examples/inference/llama_mistral/run_text_generation_mistral.sh new file mode 100755 index 0000000000..4358fd494c --- /dev/null +++ b/examples/inference/llama_mistral/run_text_generation_mistral.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# This example will start serving the Mistral-7B-v0.3 model +export NCCL_IB_SL=1 +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +DISTRIBUTED_ARGS="--nproc_per_node 1 \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr 0.0.0.0 \ + --master_port 6000" + +# Ensure CHECKPOINT and TOKENIZER_MODEL are provided +if [ -z "$1" ] || [ -z "$2" ]; then + echo "Error: You must provide CHECKPOINT and TOKENIZER_MODEL as command-line arguments." + echo "Usage: $0 /path/to/checkpoint /path/to/tokenizer_model" + exit 1 +fi + +# Assign command-line arguments to variables +CHECKPOINT=$1 +TOKENIZER_MODEL=$2 + +pip install flask-restful + +torchrun $DISTRIBUTED_ARGS tools/run_text_generation_server.py \ + --tokenizer-type HuggingFaceTokenizer \ + --tokenizer-model ${TOKENIZER_MODEL} \ + --use-checkpoint-args \ + --apply-layernorm-1p \ + --transformer-impl transformer_engine \ + --normalization RMSNorm \ + --group-query-attention \ + --num-query-groups 8 \ + --no-masked-softmax-fusion \ + --use-flash-attn \ + --untie-embeddings-and-output-weights \ + --disable-bias-linear \ + --position-embedding-type rope \ + --rotary-percent 1.0 \ + --rotary-base 1000000 \ + --swiglu \ + --ffn-hidden-size 14336 \ + --tensor-model-parallel-size 1 \ + --pipeline-model-parallel-size 1 \ + --num-layers 32 \ + --hidden-size 4096 \ + --load ${CHECKPOINT} \ + --num-attention-heads 32 \ + --max-position-embeddings 4096 \ + --bf16 \ + --micro-batch-size 1 \ + --seq-length 4096 \ + --seed 101 diff --git a/examples/run_text_generation_server_345M.sh b/examples/inference/run_text_generation_server_345M.sh similarity index 92% rename from examples/run_text_generation_server_345M.sh rename to examples/inference/run_text_generation_server_345M.sh index a151b98467..e8e61adb16 100755 --- a/examples/run_text_generation_server_345M.sh +++ b/examples/inference/run_text_generation_server_345M.sh @@ -26,9 +26,6 @@ torchrun $DISTRIBUTED_ARGS tools/run_text_generation_server.py \ --fp16 \ --micro-batch-size 1 \ --seq-length 1024 \ - --out-seq-length 1024 \ - --temperature 1.0 \ --vocab-file $VOCAB_FILE \ --merge-file $MERGE_FILE \ - --top_p 0.9 \ --seed 42 diff --git a/examples/run_text_generation_server_345M_8_tensor_parallel.sh b/examples/inference/run_text_generation_server_345M_8_tensor_parallel.sh similarity index 92% rename from examples/run_text_generation_server_345M_8_tensor_parallel.sh rename to examples/inference/run_text_generation_server_345M_8_tensor_parallel.sh index 027ab42172..368cec3b31 100755 --- a/examples/run_text_generation_server_345M_8_tensor_parallel.sh +++ b/examples/inference/run_text_generation_server_345M_8_tensor_parallel.sh @@ -24,9 +24,6 @@ python -m torch.distributed.launch $DISTRIBUTED_ARGS tools/run_text_generation_s --fp16 \ --micro-batch-size 1 \ --seq-length 1024 \ - --out-seq-length 1024 \ - --temperature 1.0 \ --vocab-file $VOCAB_FILE \ --merge-file $MERGE_FILE \ - --top_p 0.9 \ --seed 42 diff --git a/examples/inference/t5/simple_t5_batch_inference.py b/examples/inference/t5/simple_t5_batch_inference.py new file mode 100644 index 0000000000..4b15952e07 --- /dev/null +++ b/examples/inference/t5/simple_t5_batch_inference.py @@ -0,0 +1,163 @@ +import os +import sys +from argparse import Namespace + +import torch + +import pretrain_t5 +from megatron.core.inference.engines import AbstractEngine, StaticInferenceEngine +from megatron.core.inference.inference_request import InferenceRequest +from megatron.core.inference.model_inference_wrappers.inference_wrapper_config import ( + InferenceWrapperConfig, +) +from megatron.core.inference.model_inference_wrappers.t5.t5_inference_wrapper import ( + T5InferenceWrapper, +) +from megatron.core.inference.sampling_params import SamplingParams +from megatron.core.inference.text_generation_controllers.encoder_decoder_text_generation_controller import ( + EncoderDecoderTextGenerationController, +) +from megatron.core.tokenizers.text.utils.build_tokenizer import build_tokenizer +from megatron.core.transformer.module import MegatronModule +from pretrain_t5 import model_provider + +sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)) +) + +from typing import List + +from megatron.core import mpu +from megatron.training import get_args, get_model, get_tokenizer +from megatron.training.checkpointing import load_checkpoint +from megatron.training.initialize import initialize_megatron + + +def add_text_generate_args(parser): + """Text generation arguments.""" + group = parser.add_argument_group(title='text generation') + + group.add_argument("--temperature", type=float, default=1.0, help='Sampling temperature.') + group.add_argument("--top_k", type=int, default=1, help='Top k sampling.') + group.add_argument("--top_p", type=float, default=0.0, help='Top p sampling.') + group.add_argument( + "--return-log-probs", + action='store_true', + default=False, + help='Return the log probabilities of the final output tokens', + ) + group.add_argument( + "--num-tokens-to-generate", + type=int, + default=30, + help='Number of tokens to generate for each prompt', + ) + group.add_argument( + "--encoder-prompts", + metavar='N', + type=str, + nargs='+', + help='Encoder input prompts with each prompt within quotes and seperated by space', + ) + group.add_argument( + "--max-batch-size", type=int, default=1, help='Max number of prompts to process at once' + ) + return parser + + +def get_inference_engine(args: Namespace, model: MegatronModule) -> AbstractEngine: + """Utility to get the relevant backend for running inference + + This function will automatically chose the TRTLLMBackend when possible, and if not revert to Mcore backend if the user does not specify any backends. TRT LLM Backend is not implmented yet. + + Args: + args (Namespace): The user arguments parsed from command line + model (MegatronModule): The megatron model . + + Returns: + AbstractBackend: The chosen backend + """ + if args.legacy_tokenizer: + tokenizer = get_tokenizer() + else: + tokenizer = build_tokenizer(args) + + inference_wrapper_config = InferenceWrapperConfig( + hidden_size=args.hidden_size, + inference_batch_times_seqlen_threshold=args.inference_batch_times_seqlen_threshold, + fp32_residual_connection=args.fp32_residual_connection, + params_dtype=args.params_dtype, + padded_vocab_size=args.padded_vocab_size, + ) + + inference_wrapped_model = T5InferenceWrapper(model, inference_wrapper_config) + text_generation_controller = EncoderDecoderTextGenerationController( + inference_wrapped_model=inference_wrapped_model, tokenizer=tokenizer + ) + return StaticInferenceEngine( + text_generation_controller=text_generation_controller, max_batch_size=args.max_batch_size + ) + + +def main(): + """Main program.""" + + # Note: The default args passed here can be overwritten by using appropriate params (check arguments.py file) + # Micro batch size is not needed to be set by user. (It is calculated based on inference-batch-times-seqlen-threshold argument) + initialize_megatron( + extra_args_provider=add_text_generate_args, + args_defaults={ + 'no_load_rng': True, + 'no_load_optim': True, + 'micro_batch_size': 1, + 'exit_on_missing_checkpoint': True, + }, + ) + + # Set up model and load checkpoint + model = get_model(model_provider, wrap_with_ddp=False) + load_checkpoint(model, None, None) + model = model[0] + + args = get_args() + + inference_engine = get_inference_engine(args, model) + + sampling_params = SamplingParams( + temperature=args.temperature, + top_k=args.top_k, + top_p=args.top_p, + return_log_probs=args.return_log_probs, + num_tokens_to_generate=args.num_tokens_to_generate, + ) + + if args.legacy_tokenizer: + tokenizer = get_tokenizer() + else: + tokenizer = build_tokenizer(args) + decoder_prompts = [""] * len( + args.encoder_prompts + ) # for T5, the prompt is provided as encoder input, hence decoder_prompts is empty + args.prompts = decoder_prompts + + results: List[InferenceRequest] = inference_engine.generate( + prompts=args.prompts, + add_BOS=True, + encoder_prompts=args.encoder_prompts, + sampling_params=sampling_params, + ) + + if torch.distributed.get_rank() == 0: + for idx, result in enumerate(results): + print(f' \n------------- RESULT FOR PROMPT {idx} --------------- ') + result = { + 'id': result.request_id, + 'input_prompt': result.prompt, + 'generated_text': result.generated_text, + 'generated_tokens': result.generated_tokens, + } + print(result) + + +if __name__ == "__main__": + main() diff --git a/examples/llama/README.md b/examples/llama/README.md new file mode 100644 index 0000000000..2adb591b52 --- /dev/null +++ b/examples/llama/README.md @@ -0,0 +1,144 @@ +# Llama Models + +## Table of contents +- [1. Overview](#1-overview) +- [2. Prerequisites](#2-prerequisites) +- [3. Training Setup](#3-training-setup) +- [4. Configuration](#4-configuration) +- [5. Test Datasets](#5-test-datasets) +- [6. FP8 Debugging](#6-fp8-debugging) + +## 1. Overview + + +Train Llama models using FP8 precision with Megatron-Core. + +## 2. Prerequisites + + +```bash +# Clone repository +export HOST_MEGATRON_LM_DIR="/path/to/your/host/megatron-lm" +git clone https://github.com/NVIDIA/Megatron-LM.git "$HOST_MEGATRON_LM_DIR" +cd "$HOST_MEGATRON_LM_DIR" +git checkout "core_r0.12.0" + +# Set paths +export HOST_CHECKPOINT_PATH="./checkpoints/llama3_8b_fp8" +export HOST_TENSORBOARD_LOGS_PATH="./tensorboard_logs/llama3_8b_fp8" + +# Optional: For real data +# export HOST_TOKENIZER_MODEL_PATH="/path/to/host/tokenizer.model" +# export HOST_DATA_PREFIX="/path/to/host/mydata_prefix" +``` + +## 3. Training Setup + + +### Using Mock Data +```bash +PYTORCH_IMAGE="nvcr.io/nvidia/pytorch:25.03-py3" + +docker run --rm --gpus all --ipc=host --ulimit memlock=-1 \ + -v "${HOST_MEGATRON_LM_DIR}:/workspace/megatron-lm" \ + -v "${HOST_CHECKPOINT_PATH}:/workspace/checkpoints" \ + -v "${HOST_TENSORBOARD_LOGS_PATH}:/workspace/tensorboard_logs" \ + --workdir /workspace/megatron-lm \ + $PYTORCH_IMAGE \ + bash examples/llama/train_llama3_8b_h100_fp8.sh \ + /workspace/checkpoints \ + /workspace/tensorboard_logs \ + 2>&1 | tee "${HOST_TENSORBOARD_LOGS_PATH}/training_mock_$(date +'%y-%m-%d_%H-%M-%S').log" +``` + +### Using Custom Data and Tokenizer +```bash +PYTORCH_IMAGE="nvcr.io/nvidia/pytorch:25.03-py3" + +docker run --rm --gpus all --ipc=host --ulimit memlock=-1 \ + -v "${HOST_MEGATRON_LM_DIR}:/workspace/megatron-lm" \ + -v "${HOST_CHECKPOINT_PATH}:/workspace/checkpoints" \ + -v "${HOST_TENSORBOARD_LOGS_PATH}:/workspace/tensorboard_logs" \ + -v "${HOST_TOKENIZER_MODEL_PATH}:/workspace/tokenizer_model" \ + -v "$(dirname "${HOST_DATA_PREFIX}"):/workspace/data_dir" \ + --workdir /workspace/megatron-lm \ + $PYTORCH_IMAGE \ + bash examples/llama/train_llama3_8b_h100_fp8.sh \ + /workspace/checkpoints \ + /workspace/tensorboard_logs \ + /workspace/tokenizer_model \ + "/workspace/data_dir/$(basename "${HOST_DATA_PREFIX}")" \ + 2>&1 | tee "${HOST_TENSORBOARD_LOGS_PATH}/training_custom_$(date +'%y-%m-%d_%H-%M-%S').log" +``` + +## 4. Configuration + + +Default parallelism strategy: +- Tensor Parallel: 1 +- Pipeline Parallel: 1 +- Context Parallel: 2 + +Llama-3-8B architecture: +- 32 layers +- Hidden size: 4096 +- FFN hidden size: 14336 +- Attention heads: 32 +- Query groups: 8 +- Sequence length: 8192 +- RMSNorm normalization with SwiGLU and RoPE + +Key training parameters: +- Micro-batch size: 1 +- Global batch size: 128 +- Learning rate: 1.5e-4 +- Min learning rate: 1.0e-5 +- Weight decay: 0.1 +- FP8 format: hybrid + +You can modify these parameters directly in the `train_llama3_8b_h100_fp8.sh` script. + +This configuration follows those defined in NeMo Framework's performance scripts, which can be found at [https://github.com/NVIDIA/NeMo/tree/main/scripts/performance](https://github.com/NVIDIA/NeMo/tree/main/scripts/performance). + +### FP8 Performance + +| Model | #-GPUs | GBS | MBS | Seq Length | TP | PP | CP | VP | EP | GA | Tokens/sec/GPU | TFLOP/sec/GPU | +|-------|--------|-----|-----|------------|----|----|----|----|----|----|----------------|---------------| +| LLAMA3-8B | 8 | 128 | 1 | 8192 | 1 | 1 | 2 | 1 | 1 | 32 | 13812 | 800 | +| LLAMA3-70B | 64 | 128 | 1 | 8192 | 4 | 8 | 1 | 5 | 1 | 64 | 1621 | 780 | +| LLAMA3-405B | 1024 | 512 | 1 | 8192 | 8 | 8 | 2 | 8 | 1 | 64 | 315 | 834 | + +Legend: +- GBS: Global Batch Size +- MBS: Micro Batch Size +- TP: Tensor Parallel size +- PP: Pipeline Parallel size +- CP: Context Parallel size +- VP: Virtual Pipeline stages +- EP: Expert Parallel size +- GA: Gradient Accumulation steps + +As NeMo uses Megatron-Core, for the latest performance benchmarks, please refer to the official [NeMo documentation](https://docs.nvidia.com/nemo-framework/user-guide/latest/performance/performance_summary.html). + +## 5. Test Datasets + + +Recommended datasets: +1. **WikiText-103**: https://huggingface.co/datasets/Salesforce/wikitext + +Preprocess datasets: +```bash +python "${HOST_MEGATRON_LM_DIR}/tools/preprocess_data.py" \ + --input your_dataset.json \ + --output-prefix test_dataset \ + --tokenizer-type HuggingFaceTokenizer \ + --tokenizer-model /path/to/tokenizer.model \ + --append-eod +``` + +## 6. FP8 Training Considerations + + +- **Hardware**: Requires NVIDIA Hopper, Ada, or Blackwell GPUs for FP8 support + +- **Troubleshooting**: If you encounter NaN values or instability with FP8 training, please refer to [Transformer Engine](https://github.com/NVIDIA/TransformerEngine). diff --git a/examples/llama/train_llama3_8b_h100_fp8.sh b/examples/llama/train_llama3_8b_h100_fp8.sh new file mode 100644 index 0000000000..f791996308 --- /dev/null +++ b/examples/llama/train_llama3_8b_h100_fp8.sh @@ -0,0 +1,195 @@ +#!/bin/bash + +# Environment variables for performance tuning +export CUDA_DEVICE_MAX_CONNECTIONS=${CUDA_DEVICE_MAX_CONNECTIONS:-1} +#export LOG_LEVEL=${LOG_LEVEL:-INFO} +#export NCCL_IB_TIMEOUT=${NCCL_IB_TIMEOUT:-19} +#export NVTE_FWD_LAYERNORM_SM_MARGIN=${NVTE_FWD_LAYERNORM_SM_MARGIN:-16} +#export NVTE_BWD_LAYERNORM_SM_MARGIN=${NVTE_BWD_LAYERNORM_SM_MARGIN:-16} +#export NCCL_P2P_NET_CHUNKSIZE=${NCCL_P2P_NET_CHUNKSIZE:-2097152} +#export NCCL_AVOID_RECORD_STREAMS=${NCCL_AVOID_RECORD_STREAMS:-1} + +CHECKPOINT_PATH=${1:-"checkpoints/llama3_8b_fp8"} +TENSORBOARD_LOGS_PATH=${2:-"tensorboard_logs/llama3_8b_fp8"} +TOKENIZER_ARG=${3:-"MOCK"} # Path to tokenizer model, or "MOCK" +DATA_ARG=${4:-"MOCK"} # Data prefix, or "MOCK" + +# Create directories if they don't exist +mkdir -p "$(dirname "$CHECKPOINT_PATH")" +mkdir -p "$(dirname "$TENSORBOARD_LOGS_PATH")" + +# Distributed training setup +GPUS_PER_NODE=8 +NUM_NODES=1 +MASTER_ADDR=${MASTER_ADDR:-localhost} +MASTER_PORT=${MASTER_PORT:-6000} +NODE_RANK=${NODE_RANK:-0} +WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) + +# Path to the pretrain_gpt.py script, assuming this script is run from the root of the Megatron-LM repository +PRETRAIN_SCRIPT_PATH="pretrain_gpt.py" + +# Fixed model and training parameters +TP_SIZE=1 +CP_SIZE=1 +PP_SIZE=1 +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=128 +NUM_LAYERS=32 +DTYPE="fp8" +SEQ_LENGTH=8192 +MAX_POSITION_EMBEDDINGS=8192 + +# Data cache path (useful for both mock and real data) +DATA_CACHE_PATH="${PWD}/benchmark_cache_llama3_8b_fp8" +mkdir -p "$DATA_CACHE_PATH" + +DISTRIBUTED_ARGS=( + --nproc_per_node $GPUS_PER_NODE + --nnodes $NUM_NODES + --node_rank $NODE_RANK + --master_addr $MASTER_ADDR + --master_port $MASTER_PORT +) + +MODEL_ARGS=( + --use-mcore-models + --num-layers $NUM_LAYERS + --hidden-size 4096 + --ffn-hidden-size 14336 + --num-attention-heads 32 + --group-query-attention + --num-query-groups 8 + --kv-channels 128 + --seq-length $SEQ_LENGTH + --max-position-embeddings $MAX_POSITION_EMBEDDINGS + --position-embedding-type rope + --rotary-base 1000000 + --rotary-percent 1.0 + --attention-dropout 0.0 + --hidden-dropout 0.0 + --swiglu + --init-method-std 0.0134 + --attention-backend fused + --apply-layernorm-1p + --untie-embeddings-and-output-weights + --disable-bias-linear +) + +TRAINING_ARGS=( + --micro-batch-size $MICRO_BATCH_SIZE + --global-batch-size $GLOBAL_BATCH_SIZE + --train-samples 1953125000 + --lr-decay-samples 1949218748 + --lr-warmup-samples 3906252 + --lr 0.00015 + --min-lr 0.00001 + --decoupled-lr 5.0e-4 # Specific to decoupled AdamW, ensure optimizer is compatible + --decoupled-min-lr 4.5e-5 # Specific to decoupled AdamW + --lr-decay-style cosine + --clip-grad 1.0 + --weight-decay 0.1 + --adam-beta1 0.9 + --adam-beta2 0.95 + --bf16 + --grad-reduce-in-bf16 + --cross-entropy-loss-fusion + --calculate-per-token-loss + --manual-gc + --empty-unused-memory-level 1 + --exit-duration-in-mins 235 +) + +# Conditional arguments based on DTYPE (FP8) +DTYPE_ARGS=() +if [[ "$DTYPE" == "fp8" ]]; then + DTYPE_ARGS+=( + "--fp8-format hybrid" + "--fp8-amax-history-len 1024" + "--fp8-amax-compute-algo max" + "--fp8-param-gather" + ) +fi + +# Model parallelism arguments +MODEL_PARALLEL_ARGS=( + --tensor-model-parallel-size $TP_SIZE + --context-parallel-size $CP_SIZE + # --pipeline-model-parallel-size $PP_SIZE # Not explicitly set in llama script options, assume 1 if not multi-node PP + --sequence-parallel # Always enable sequence parallelism with TP_SIZE=2 +) + +# Distributed Data Parallel (DDP) arguments +# From original script's ddp_args +DDP_ARGS=( + --use-distributed-optimizer + --overlap-grad-reduce + --overlap-param-gather +) +TRAINING_ARGS+=("${DDP_ARGS[@]}") + + +# Data arguments (conditional for mock vs real data) +DATA_ARGS_LIST=() +if [[ "$TOKENIZER_ARG" == "MOCK" ]] || [[ "$DATA_ARG" == "MOCK" ]] || [[ -z "$TOKENIZER_ARG" ]]; then + DATA_ARGS_LIST+=( + "--mock-data" + "--tokenizer-type NullTokenizer" + "--vocab-size 128256" + "--data-cache-path ${DATA_CACHE_PATH}" + "--tiktoken-pattern v2" + "--split '99,1,0'" + "--no-create-attention-mask-in-dataloader" + "--no-mmap-bin-files" + "--num-workers 1" + ) +else + # Settings for real data + DATA_ARGS_LIST+=( + "--data-path $DATA_ARG" + "--tokenizer-type HuggingFaceTokenizer" + "--tokenizer-model $TOKENIZER_ARG" + "--data-cache-path ${DATA_CACHE_PATH}" + "--split '99,1,0'" + "--no-create-attention-mask-in-dataloader" + "--no-mmap-bin-files" + "--num-workers 1" + # Note: --vocab-size might be inferred by HuggingFaceTokenizer or might need to be explicit. + "--vocab-size 128256" + ) +fi + +EVAL_AND_LOGGING_ARGS=( + --log-interval 1 + --eval-iters 32 + --eval-interval 100 + --save-interval 1000 + --log-throughput + --profile + --profile-step-start 4 + --profile-step-end 6 + --ckpt-format torch_dist + --distributed-timeout-minutes 60 + --save "$CHECKPOINT_PATH" + --load "$CHECKPOINT_PATH" + --tensorboard-dir "$TENSORBOARD_LOGS_PATH" +) + +# Ensure pretrain_gpt.py is found +if [ ! -f "$PRETRAIN_SCRIPT_PATH" ]; then + echo "Error: pretrain_gpt.py not found at $PRETRAIN_SCRIPT_PATH" + echo "Please ensure you are running this script from the root of the Megatron-LM repository, and pretrain_gpt.py is present." + exit 1 +fi + +# Run the training command +torchrun ${DISTRIBUTED_ARGS[@]} \ + "$PRETRAIN_SCRIPT_PATH" \ + ${MODEL_ARGS[@]} \ + ${TRAINING_ARGS[@]} \ + ${DTYPE_ARGS[@]} \ + ${MODEL_PARALLEL_ARGS[@]} \ + ${DATA_ARGS_LIST[@]} \ + ${EVAL_AND_LOGGING_ARGS[@]} + +set +x \ No newline at end of file diff --git a/examples/mamba/.gitignore b/examples/mamba/.gitignore new file mode 100644 index 0000000000..940f4797e4 --- /dev/null +++ b/examples/mamba/.gitignore @@ -0,0 +1,4 @@ +checkpoints/ +data-cache/ +tensorboard/ +triton-cache/ diff --git a/examples/mamba/Dockerfile b/examples/mamba/Dockerfile new file mode 100644 index 0000000000..2e194095b7 --- /dev/null +++ b/examples/mamba/Dockerfile @@ -0,0 +1,32 @@ +FROM nvcr.io/nvidia/pytorch:24.01-py3 + +RUN pip uninstall -y triton && \ + pip install triton==2.1.0 sentencepiece==0.1.99 flask-restful + +# The causal-conv1d and mamba-ssm packages below are built from scratch here +# (which takes significant time) because there are no wheels available on PyPI +# for these relatively newer versions of the packages that are compatible with +# the older NGC-variant PyTorch version (e.g. version 2.2.0.dev231106) that we +# are using (in the NGC base container). Generally, if the package is not +# compatible with the PyTorch version, then it will generate a Python import +# error. The package authors tend to only release wheels for new versions of +# these pacakges which are compatible with the versions of regular PyTorch and +# NGC-variant PyTorch that are newer at the time of release. So, to use newer +# versions of these packages with relatively older versions of the NGC PyTorch +# container, we tend to have to build the packages from scratch. + +RUN cd /tmp && \ + git clone https://github.com/Dao-AILab/causal-conv1d.git && \ + cd causal-conv1d && \ + git checkout v1.2.2.post1 && \ + CAUSAL_CONV1D_FORCE_BUILD=TRUE pip install . && \ + cd .. && \ + rm -rf causal-conv1d + +RUN cd /tmp && \ + git clone https://github.com/state-spaces/mamba.git && \ + cd mamba && \ + git checkout v2.0.3 && \ + MAMBA_FORCE_BUILD=TRUE pip install . && \ + cd .. && \ + rm -rf mamba diff --git a/examples/mamba/README.md b/examples/mamba/README.md new file mode 100644 index 0000000000..f8f6d79683 --- /dev/null +++ b/examples/mamba/README.md @@ -0,0 +1,94 @@ +# Mamba-based Language Models + +## Introduction + +This document is an entrypoint into the code used for +[An Empirical Study of Mamba-based Language Models](https://arxiv.org/abs/2406.07887). + +We are releasing the parameters for some of the models described in that +technical report via +[HuggingFace](https://huggingface.co/collections/nvidia/ssms-666a362c5c3bb7e4a6bcfb9c). +The code in the `main` branch is no longer compatible with the `Mamba2-*` +checkpoints. You can load them using the +[fixed snapshot of the code used for the technical report](https://github.com/NVIDIA/Megatron-LM/tree/ssm/examples/mamba). + +## Installation + +Create and run a Docker container using the [Dockerfile](./Dockerfile). + +``` +docker build -t your_image_name:your_tag . +docker run --gpus all -it --rm \ + -v /path/to/megatron:/workspace/megatron \ + -v /path/to/dataset:/workspace/dataset \ + -v /path/to/checkpoints:/workspace/checkpoints \ + -w /workspace/megatron/examples/mamba \ + your_image_name:your_tag +``` + +## Train + +[`train.sh`](./train.sh) is an example pretraining script, showing how to run on +a single node. Select between 800M-scale and 8B-scale models by setting the +`MODEL_SCALE` variable. The 8B-scale hybrid model architecture is the same as +the one described in the technical report. + +## Text Generation + +Use [`run_text_gen_server_8b.sh`](./run_text_gen_server_8b.sh) to start a text +generation server using an 8B hybrid checkpoint. This is configured to run the +8B hybrid model described in the technical report, with tensor model parallel +set to 1. + +The arguments in the script will need to be changed if using a checkpoint with a +different model parallel configuration or other differences, such as model +architecture. For example, to run the 8B pure Mamba-2 model, change +`--hybrid-attention-ratio` and `--hybrid-mlp-ratio` to 0.0, or remove them. + +Use [`run_text_gen_server_8b_gpt3.sh`](./run_text_gen_server_8b_gpt3.sh) to start +a text generation server using the 8B reference Transformer checkpoint. + +## Checkpoint Formats + +For inference, the model must be configured to match the checkpoint file used, +including the hybrid layer configuration and model parallel configuration. + +If you need to convert a hybrid checkpoint file to a different tensor parallel +or pipeline parallel size, use +[the hybrid conversion script](../../tools/checkpoint/hybrid_conversion.py). +There is an example run command at the end of that file. + +Before running that script, you will need to set `PYTHONPATH` to include the +root directory of your Megatron-LM repository clone. + +``` +export PYTHONPATH=:PYTHONPATH +``` + +## Hybrid Options + +`--hybrid-attention-ratio ATT` specifies a target ratio of attention layers +to total layers. For example, 4 attention layers out of 48 total layers is +specified by `--hybrid-attention-ratio 0.08`. + +`--hybrid-mlp-ratio MLP` specifies a target ratio of MLP layers to total +layers. For example, 24 MLP layers out of 48 total layers is specified by +`--hybrid-mlp-ratio 0.5`. + +* (`ATT` + `MLP`) must be less than or equal to 1.0. +* (1.0 - `ATT` - `MLP`) is the hybrid mamba ratio, the ratio of mamba layers to +total layers. +* `ATT` = `MLP` = 0 is a pure Mamba model. +* `ATT` = `MLP` = 0.5 is a transfomer model. + +If either `ATT` or `MLP` is greater than 0.0 or if `--hybrid-override-pattern` +is specified, the logfile will include information about the hybrid layer +pattern used. `--hybrid-override-pattern` can be used to specify a different +pattern than the default, algorithmically-generated one. + +## Mamba vs Mamba-2 + +This codebase currently only supports Mamba-2, and not the original version of +Mamba. However, the +[fixed snapshot of the code used for the technical report](https://github.com/NVIDIA/Megatron-LM/tree/ssm/examples/mamba) +can be configured to run the original version of Mamba. diff --git a/examples/mamba/run_text_gen_server_8b.sh b/examples/mamba/run_text_gen_server_8b.sh new file mode 100755 index 0000000000..8d3137f244 --- /dev/null +++ b/examples/mamba/run_text_gen_server_8b.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# Use: ./run_text_gen_server_8b.sh +# To launch the client: python ../../tools/text_generation_cli.py + +CHECKPOINT_PATH=$1 +TOKENIZER_PATH=$2 + +DISTRIBUTED_ARGS="--nproc_per_node 1 \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +export NCCL_IB_SL=1 +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_IB_TIMEOUT=19 +export NCCL_IB_QPS_PER_CONNECTION=4 + +export TRITON_CACHE_DIR="./triton-cache/" +export TRITON_CACHE_MANAGER="megatron.core.ssm.triton_cache_manager:ParallelFileCacheManager" + +torchrun $DISTRIBUTED_ARGS ../../tools/run_mamba_text_generation_server.py \ + --tensor-model-parallel-size 1 \ + --pipeline-model-parallel-size 1 \ + --untie-embeddings-and-output-weights \ + --num-layers 56 \ + --hidden-size 4096 \ + --load ${CHECKPOINT_PATH} \ + --num-attention-heads 32 \ + --group-query-attention \ + --num-query-groups 8 \ + --hybrid-attention-ratio 0.08 \ + --hybrid-mlp-ratio 0.5 \ + --attention-dropout 0.0 \ + --hidden-dropout 0.0 \ + --disable-bias-linear \ + --normalization RMSNorm \ + --seq-length 4096 \ + --max-position-embeddings 4096 \ + --position-embedding-type none \ + --tokenizer-type GPTSentencePieceTokenizer \ + --tokenizer-model ${TOKENIZER_PATH} \ + --distributed-backend nccl \ + --distributed-timeout-minutes 1440 \ + --bf16 \ + --micro-batch-size 1 \ + --use-mcore-models \ + --spec megatron.core.models.mamba.mamba_layer_specs mamba_stack_spec \ + --seed 42 diff --git a/examples/mamba/run_text_gen_server_8b_gpt3.sh b/examples/mamba/run_text_gen_server_8b_gpt3.sh new file mode 100644 index 0000000000..5413b245ed --- /dev/null +++ b/examples/mamba/run_text_gen_server_8b_gpt3.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# Use: ./run_text_gen_server_8b_gpt3.sh +# To launch the client: python ../../tools/text_generation_cli.py + +CHECKPOINT_PATH=$1 +TOKENIZER_PATH=$2 + +DISTRIBUTED_ARGS="--nproc_per_node 1 \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +export NCCL_IB_SL=1 +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_IB_TIMEOUT=19 +export NCCL_IB_QPS_PER_CONNECTION=4 + +torchrun $DISTRIBUTED_ARGS ../../tools/run_text_generation_server.py \ + --tensor-model-parallel-size 1 \ + --pipeline-model-parallel-size 1 \ + --use-flash-attn \ + --apply-layernorm-1p \ + --untie-embeddings-and-output-weights \ + --num-layers 32 \ + --hidden-size 4096 \ + --load ${CHECKPOINT_PATH} \ + --num-attention-heads 32 \ + --attention-dropout 0.0 \ + --hidden-dropout 0.0 \ + --disable-bias-linear \ + --seq-length 4096 \ + --max-position-embeddings 4096 \ + --position-embedding-type rope \ + --rotary-percent 0.5 \ + --squared-relu \ + --tokenizer-type GPTSentencePieceTokenizer \ + --tokenizer-model ${TOKENIZER_PATH} \ + --distributed-backend nccl \ + --distributed-timeout-minutes 1440 \ + --bf16 \ + --micro-batch-size 1 \ + --use-mcore-models \ + --transformer-impl local \ + --seed 42 diff --git a/examples/mamba/train.sh b/examples/mamba/train.sh new file mode 100755 index 0000000000..3952a997d4 --- /dev/null +++ b/examples/mamba/train.sh @@ -0,0 +1,105 @@ +#!/bin/bash + +# Use: ./train.sh + +MODEL_SCALE="800M" # or "8B" + +case "${MODEL_SCALE}" in + "800M") + TENSOR_MODEL_PARALLEL_SIZE=1 + NUM_LAYERS=48 + HIDDEN_SIZE=1024 + NUM_ATTENTION_HEADS=16 + GLOBAL_BATCH_SIZE=32 + ;; + "8B") + TENSOR_MODEL_PARALLEL_SIZE=4 + NUM_LAYERS=56 + HIDDEN_SIZE=4096 + NUM_ATTENTION_HEADS=32 + GLOBAL_BATCH_SIZE=8 + ;; + *) + echo "Invalid version specified" + exit 1 + ;; +esac + +DATA_PATH=$1 +TOKENIZER_PATH=$2 + +export NCCL_IB_SL=1 +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_IB_TIMEOUT=19 +export NCCL_IB_QPS_PER_CONNECTION=4 + +CHECKPOINT_DIR="./checkpoints" +DATACACHE_DIR="./data-cache" +TENSORBOARD_DIR="./tensorboard" + +mkdir -p ${CHECKPOINT_DIR} +mkdir -p ${DATACACHE_DIR} +mkdir -p ${TENSORBOARD_DIR} + +export TRITON_CACHE_DIR="./triton-cache/" +export TRITON_CACHE_MANAGER="megatron.core.ssm.triton_cache_manager:ParallelFileCacheManager" + +SEQ_LEN=4096 +TRAIN_SAMPLES=73242188 # 300B tokens / 4096 +LR_WARMUP_SAMPLES=50000 +LR_DECAY_SAMPLES=73192188 # TRAIN_SAMPLES - LR_WARMUP_SAMPLES + +options=" \ + --tensor-model-parallel-size ${TENSOR_MODEL_PARALLEL_SIZE} \ + --sequence-parallel \ + --pipeline-model-parallel-size 1 \ + --use-distributed-optimizer \ + --overlap-param-gather \ + --overlap-grad-reduce \ + --untie-embeddings-and-output-weights \ + --init-method-std 0.02 \ + --position-embedding-type none \ + --num-layers ${NUM_LAYERS} \ + --hidden-size ${HIDDEN_SIZE} \ + --num-attention-heads ${NUM_ATTENTION_HEADS} \ + --group-query-attention \ + --num-query-groups 8 \ + --hybrid-attention-ratio 0.08 \ + --hybrid-mlp-ratio 0.5 \ + --seq-length ${SEQ_LEN} \ + --max-position-embeddings ${SEQ_LEN} \ + --train-samples ${TRAIN_SAMPLES} \ + --lr-warmup-samples ${LR_WARMUP_SAMPLES} \ + --lr-decay-samples ${LR_DECAY_SAMPLES} \ + --save ${CHECKPOINT_DIR} \ + --load ${CHECKPOINT_DIR} \ + --data-path ${DATA_PATH} \ + --data-cache-path ${DATACACHE_DIR} \ + --split 99,1,0 \ + --tokenizer-type GPTSentencePieceTokenizer \ + --tokenizer-model ${TOKENIZER_PATH} \ + --distributed-backend nccl \ + --micro-batch-size 4 \ + --global-batch-size ${GLOBAL_BATCH_SIZE} \ + --lr 2.5e-4 \ + --min-lr 2.5e-5 \ + --lr-decay-style cosine \ + --weight-decay 0.1 \ + --clip-grad 1.0 \ + --attention-dropout 0.0 \ + --hidden-dropout 0.0 \ + --disable-bias-linear \ + --normalization RMSNorm \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --log-interval 10 \ + --save-interval 2000 \ + --eval-interval 2000 \ + --eval-iters 32 \ + --bf16 \ + --use-mcore-models \ + --spec megatron.core.models.mamba.mamba_layer_specs mamba_stack_spec \ + --no-create-attention-mask-in-dataloader \ + --tensorboard-dir ${TENSORBOARD_DIR}" + +torchrun --nproc_per_node 8 ../../pretrain_mamba.py ${options} diff --git a/examples/merge_mp_bert.sh b/examples/merge_mp_bert.sh deleted file mode 100755 index 1383433284..0000000000 --- a/examples/merge_mp_bert.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -TENSOR_MODEL_PARALLEL_SIZE=2 - -VOCAB_FILE=bert-vocab.txt -CHECKPOINT_PATH=checkpoints/bert_345m - -WORLD_SIZE=$TENSOR_MODEL_PARALLEL_SIZE python tools/merge_mp_partitions.py \ - --model-type BERT \ - --tensor-model-parallel-size $TENSOR_MODEL_PARALLEL_SIZE \ - --tokenizer-type BertWordPieceLowerCase \ - --vocab-file $VOCAB_FILE \ - --num-layers 24 \ - --hidden-size 1024 \ - --num-attention-heads 16 \ - --seq-length 512 \ - --max-position-embeddings 512 \ - --load $CHECKPOINT_PATH diff --git a/examples/mimo/__init__.py b/examples/mimo/__init__.py new file mode 100644 index 0000000000..0519ecba6e --- /dev/null +++ b/examples/mimo/__init__.py @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/examples/mimo/avlm_inference.py b/examples/mimo/avlm_inference.py new file mode 100644 index 0000000000..14ca2068ca --- /dev/null +++ b/examples/mimo/avlm_inference.py @@ -0,0 +1,244 @@ +import argparse +import os +from pathlib import Path +from typing import Union + +# hf path +import requests +import torch +from PIL import Image +from transformers import AutoProcessor +from transformers import AutoTokenizer +import soundfile as sf +import io +import numpy as np +import scipy.signal as signal + +from examples.mimo.model_providers.llava_avlm import model_provider_llava_avlm +from megatron.core import dist_checkpointing, parallel_state, tensor_parallel +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.training import print_rank_0 +from examples.mimo.data.utils.calculate_audio_tokens import calculate_num_audio_tokens + +def init_distributed(tp_size: int = 1, pp_size: int = 1): + if torch.distributed.is_initialized(): + return + rank = int(os.environ.get("LOCAL_RANK", 0)) + world_size = int(os.environ.get("WORLD_SIZE", 1)) + torch.cuda.set_device(rank % torch.cuda.device_count()) + torch.distributed.init_process_group("nccl", rank=rank, world_size=world_size) + parallel_state.initialize_model_parallel(tp_size, pp_size) + +def get_input_data( + processor: AutoProcessor, + image_processor: AutoProcessor, + audio_processor: AutoProcessor, + audio_path: str, + image_path: str, + prompt: str, + device: Union[int, str] = 0): + """ + Prepare inputs for the MIMO model forward pass. + """ + + def read_audio(audio_path): + """Process audio file and return tensor.""" + with open(audio_path, 'rb') as f: + audio_bytes = f.read() + audio_io = io.BytesIO(audio_bytes) + waveform, sample_rate = sf.read(audio_io) + + # Resample if needed + fixed_sample_rate = 16000 + if sample_rate != fixed_sample_rate: + num_samples = int(len(waveform) * fixed_sample_rate / sample_rate) + waveform = signal.resample(waveform, num_samples) + + # Convert to tensor + audio_tensor = torch.from_numpy(waveform).float() + return audio_tensor + + def read_image(image_path): + """Process image file and return tensor.""" + with open(image_path, 'rb') as f: + image_bytes = f.read() + image_io = io.BytesIO(image_bytes) + image = Image.open(image_io) + image_tensor = torch.from_numpy(np.array(image)).permute(2, 0, 1) # Convert to CxHxW format + image_tensor = image_tensor.float() / 255.0 # rescale to [0,1] range + return image_tensor + + + # read audio and image + audio_tensor = read_audio(audio_path) + image_tensor = read_image(image_path) + + # set up prompt + conversation = [ + { + "role": "user", + "content": [ + {"type": "text", "text": prompt}, + ], + } + ] + prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) + + # process audio + processed_audios = audio_processor(audio_tensor, sampling_rate=16000) + processed_audios = torch.tensor(processed_audios["input_features"]) + processed_audios = processed_audios.squeeze(0) # remove batch dim + num_audio_tokens = calculate_num_audio_tokens(audio_tensor.unsqueeze(0), "openai/whisper-base") + audios_seq_lengths = torch.tensor(num_audio_tokens) + prompt = prompt.replace("