Commit 2d2fca6c authored by jerrrrry's avatar jerrrrry
Browse files

Initial commit

parents
Pipeline #3401 failed with stages
in 0 seconds
.build_rules:
rules:
- when: on_success
stage: test
.build_image:
extends: [.build_rules, .dind_rules]
stage: build
tags:
- arch/amd64
- origin/jet-fleet
- env/prod
- ${TAG}
services:
- name: docker:24.0.5-dind
variables:
HEALTHCHECK_TCP_PORT: "2376"
timeout: 180m
variables:
DOCKER_HOST: tcp://docker:2376
DOCKER_TLS_CERTDIR: "/certs"
DOCKER_TLS_VERIFY: 1
DOCKER_CERT_PATH: "$DOCKER_TLS_CERTDIR/client"
TAG: purpose/builder-large
STAGE: jet
MCORE_BACKWARDS_REF: core_r0.14.0
KUBERNETES_SERVICE_MEMORY_REQUEST: 90Gi
KUBERNETES_SERVICE_MEMORY_LIMIT: 90Gi
SHARED_PATH: /builds/$CI_PROJECT_PATH/shared
script:
- eval PUBLISH_COMMIT=$PUBLISH_COMMIT
- apk add bash curl git
- export TE_GIT_REF=$TE_GIT_REF
- export GH_TOKEN=$GH_TOKEN
- bash .gitlab/scripts/build.sh
- git fetch origin $MCORE_BACKWARDS_REF
- MCORE_BACKWARDS_COMMIT=$(git rev-parse FETCH_HEAD)
- echo "MCORE_MR_COMMIT=$CI_COMMIT_SHA" | tee -a build.env
- echo "MCORE_BACKWARDS_COMMIT=$MCORE_BACKWARDS_COMMIT" | tee -a build.env
- cat build.env
retry:
max: 2
artifacts:
reports:
dotenv: build.env
test:build_image:
extends: [.build_image]
parallel:
matrix:
- IMAGE: CI_MCORE_LTS_IMAGE
FILE: Dockerfile.ci.lts
BASE_IMAGE: nvcr.io/nvidia/pytorch:24.01-py3
- IMAGE: CI_MCORE_DEV_IMAGE
FILE: Dockerfile.ci.dev
BASE_IMAGE: nvcr.io/nvidia/pytorch:25.09-py3
- IMAGE: UTILITY_IMAGE
FILE: Dockerfile.linting
BASE_IMAGE: python:3.10
test:build_nemo_image:
extends: [.build_image]
variables:
IMAGE: CI_NEMO_IMAGE
FILE: Dockerfile.ci.nemo
BASE_IMAGE: nvcr.io/nvidian/nemo:nightly
rules:
- if: $FUNCTIONAL_TEST == "yes" || $INTEGRATION_TEST == "yes" || $CI_COMMIT_BRANCH == "ci-rebuild-mcore-nemo-image"
when: on_success
.test_rules:
rules:
- if: $PUBLISH == "yes"
when: never
- when: on_success
stage: test
include:
- template: Security/Secret-Detection.gitlab-ci.yml
wait_for_resources:
extends: [.test_rules]
needs:
- test:linting_formatting
- test:linting_copyright
- job: test:linting_secret_detection
optional: true
- test:build_image
image: python:3.10
timeout: 7 days
variables:
KUBERNETES_SERVICE_MEMORY_REQUEST: 32Gi
KUBERNETES_SERVICE_MEMORY_LIMIT: 32Gi
KUBERNETES_SERVICE_CPU_REQUEST: 8
KUBERNETES_SERVICE_CPU_LIMIT: 12
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
script:
- env
- pip install --no-cache-dir python-gitlab click
- export RO_API_TOKEN=${PROJECT_ACCESS_TOKEN_MCORE}
- export GITLAB_ENDPOINT
- export NUM_CONCURRENT_JOBS
- python tests/test_utils/python_scripts/wait_for_resources.py --pipeline-id $CI_PIPELINE_ID --target-branch $CI_MERGE_REQUEST_TARGET_BRANCH_NAME
rules:
- if: $CI_MERGE_REQUEST_LABELS =~ /fast-track/
when: never
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
when: on_success
- when: never
test:unit_tests_configure:
extends: [.test_rules]
needs:
- test:build_image
- job: wait_for_resources
optional: true
image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID}
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
before_script:
- git rm -r tests/test_utils/local_recipes || true
- git submodule add --force https://gitlab-ci-token:${CI_JOB_TOKEN}@${GITLAB_ENDPOINT}/ADLR/megatron-lm-convergence-tests.git tests/test_utils/local_recipes
- ls tests/test_utils/local_recipes
script:
- env
- set -x
- |
A100_CLUSTER=$([[ "$CLUSTER_A100" != "" ]] && echo $CLUSTER_A100 || echo $DEFAULT_A100_CLUSTER)
H100_CLUSTER=$([[ "$CLUSTER_H100" != "" ]] && echo $CLUSTER_H100 || echo $DEFAULT_H100_CLUSTER)
- |
ARGS=(
"--scope unit-tests"
"--n-repeat ${UNIT_TEST_REPEAT}"
"--time-limit $(( UNIT_TEST_TIMEOUT * 60 ))"
"--test-cases all"
"--cluster dgxh100_coreweave"
"--platform dgx_h100"
"--partition batch"
"--container-image ${UTILITY_IMAGE}"
"--container-tag ${CI_PIPELINE_ID}"
"--dependent-job test:unit_tests_configure"
"--slurm-account ${CI_SLURM_ACCOUNT}"
"--no-enable-warmup"
)
- |
export PYTHONPATH=$(pwd)
python tests/test_utils/python_scripts/generate_jet_trigger_job.py \
${ARGS[@]} \
--environment "lts" \
--tag "legacy" \
--output-path "unit-test-job-lts-legacy.yaml"
- |
export PYTHONPATH=$(pwd)
python tests/test_utils/python_scripts/generate_jet_trigger_job.py \
${ARGS[@]} \
--environment "lts" \
--tag "latest" \
--output-path "unit-test-job-lts-latest.yaml"
- |
export PYTHONPATH=$(pwd)
python tests/test_utils/python_scripts/generate_jet_trigger_job.py \
${ARGS[@]} \
--environment "dev" \
--tag "legacy" \
--output-path "unit-test-job-dev-legacy.yaml"
- |
export PYTHONPATH=$(pwd)
python tests/test_utils/python_scripts/generate_jet_trigger_job.py \
${ARGS[@]} \
--environment "dev" \
--tag "latest" \
--output-path "unit-test-job-dev-latest.yaml"
rules:
- if: $UNIT_TEST == 'yes' && $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED != "true"
allow_failure: true
when: on_success
- if: $UNIT_TEST == 'yes' && $UNIT_TEST_REPEAT != '0'
when: on_success
artifacts:
paths:
- unit-test-job-dev-legacy.yaml
- unit-test-job-dev-latest.yaml
- unit-test-job-lts-legacy.yaml
- unit-test-job-lts-latest.yaml
- tests/test_utils/local_recipes
.unit_tests_run:
needs:
- test:linting_formatting
- test:linting_copyright
- job: test:linting_secret_detection
optional: true
- test:unit_tests_configure
- test:build_image
extends: [.test_rules]
trigger:
include:
- artifact: unit-test-job-$ENVIRONMENT-$TAG.yaml
job: test:unit_tests_configure
strategy: depend
variables:
RO_API_TOKEN: $PAT
CONTAINER_TAG: $CI_PIPELINE_ID
CI_MCORE_LTS_IMAGE: $CI_MCORE_LTS_IMAGE
GITLAB_ENDPOINT: $GITLAB_ENDPOINT
PARENT_PIPELINE_ID: $CI_PIPELINE_ID
MCORE_MR_COMMIT: $MCORE_MR_COMMIT
MCORE_BACKWARDS_COMMIT: $MCORE_BACKWARDS_COMMIT
inherit:
variables: true
rules:
- if: $UNIT_TEST == 'yes' && $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED != "true"
allow_failure: true
when: on_success
- if: $UNIT_TEST == 'yes' && $UNIT_TEST_REPEAT != '0'
when: on_success
test:unit_tests_pyt(DEV)_mcore(legacy):
extends: [.unit_tests_run]
variables:
ENVIRONMENT: dev
TAG: legacy
rules:
- if: $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == 'dev'
when: never
- if: $CI_COMMIT_BRANCH == 'ci-dev-unit-test-extended'
when: never
- if: $CI_MERGE_REQUEST_TARGET_BRANCH_NAME =~ /^core_r/
when: never
- if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME != 'main'
when: never
- if: $UNIT_TEST == 'yes' && $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED != "true"
allow_failure: true
when: on_success
- if: $UNIT_TEST == 'yes' && $UNIT_TEST_REPEAT != '0'
when: on_success
test:unit_tests_pyt(LTS)_mcore(legacy):
extends: [.unit_tests_run]
variables:
ENVIRONMENT: lts
TAG: legacy
rules:
- if: $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == 'dev'
when: never
- if: $CI_COMMIT_BRANCH == 'ci-dev-unit-test-extended'
when: never
- if: $CI_MERGE_REQUEST_TARGET_BRANCH_NAME =~ /^core_r/
when: never
- if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME != 'main'
when: never
- if: $UNIT_TEST == 'yes' && $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED != "true"
allow_failure: true
when: on_success
- if: $UNIT_TEST == 'yes' && $UNIT_TEST_REPEAT != '0'
when: on_success
test:unit_tests_pyt(DEV)_mcore(latest):
extends: [.unit_tests_run]
variables:
ENVIRONMENT: dev
TAG: latest
test:unit_tests_pyt(LTS)_mcore(latest):
extends: [.unit_tests_run]
variables:
ENVIRONMENT: lts
TAG: latest
test:unit_tests_notify:
extends: [.test_rules]
image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID}
needs:
- test:unit_tests_pyt(DEV)_mcore(latest)
- test:unit_tests_pyt(LTS)_mcore(latest)
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
script:
- env
- |
if [[ "$CI_COMMIT_BRANCH" == *dev* ]]; then
export WEBHOOK_URL=${MCORE_NOTIFICATION_HOOK_DEV}
else
export WEBHOOK_URL=${MCORE_NOTIFICATION_HOOK}
fi
- export RO_API_TOKEN=${PROJECT_ACCESS_TOKEN_MCORE}
- export GITLAB_ENDPOINT
- export TAG_TEAM=$([[ "$CI_COMMIT_BRANCH" == "main" ]] && echo "1" || "0")
- export TEAM_SLUG=$SLACK_ADMIN
- |
python tests/test_utils/python_scripts/notify.py \
--pipeline-id "${CI_PIPELINE_ID}" \
--check-for unit-tests \
--pipeline-context "unit-tests-extended" \
--pipeline-created-at "${CI_PIPELINE_CREATED_AT}"
artifacts:
when: always
paths:
- scripts
rules:
- if: $CI_PIPELINE_SOURCE == "schedule" && ($CI_COMMIT_BRANCH == "ci-unit-test-extended" || $CI_COMMIT_BRANCH == "ci-dev-unit-test-extended")
when: always
- when: never
test:linting_docs_build:
extends: [.test_rules]
image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID}
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
needs: [test:build_image]
script:
- cd ..
- rm -rf documentation && git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@${GITLAB_ENDPOINT}/nemo-megatron-core-tme/documentation.git
- mv megatron-lm/ documentation/
- cd documentation/
- ./repo docs
test:linting_formatting:
extends: [.test_rules]
image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID}
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
needs: [test:build_image]
variables:
GIT_STRATEGY: "clone"
script:
- |
if [[ "$CI_PIPELINE_SOURCE" != "merge_request_event" ]]; then
exit 0
fi
- set +e
- git fetch origin main:main
- |
if [[ "$CI_MERGE_REQUEST_PROJECT_PATH" == "$CI_MERGE_REQUEST_SOURCE_PROJECT_PATH" ]]; then
bash tools/autoformat.sh
set -e
git fetch origin $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME
git checkout $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME
git config --global user.email "mcore-bot@nvidia.com"
git config --global user.name "Mcore Bot"
git remote set-url origin "https://gitlab-ci-token:${PAT}@${GITLAB_ENDPOINT}/$CI_PROJECT_NAMESPACE/megatron-lm.git"
git add -A .
git commit -m "chore: Format files" || true
git push -u origin $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME
fi
- env
- BASE_REF="$CI_MERGE_REQUEST_TARGET_BRANCH_NAME" CHECK_ONLY=true SKIP_DOCS=$([[ "$CI_MERGE_REQUEST_LABELS" == *"Skip docs"* ]] && echo "true" || echo "false") bash tools/autoformat.sh
test:linting_copyright:
extends: [.test_rules]
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID}
needs: [test:build_image]
script:
- git fetch origin main
- bash tools/copyright.sh
# Override from template
secret_detection:
rules:
- when: never
# Inherit and modify template
test:linting_secret_detection:
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
extends: [".secret-analyzer"]
needs: [test:build_image]
variables:
GIT_DEPTH: 0
SECRET_DETECTION_LOG_OPTIONS: ${CI_MERGE_REQUEST_DIFF_BASE_SHA}..${CI_COMMIT_SHA}
allow_failure: false
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
- when: never
script:
- apk add jq
- /analyzer run
- |
if [[ $(cat gl-secret-detection-report.json | jq '.vulnerabilities | length > 0') == true ]]; then
echo "Atleast one vulnerability has been found"
cat gl-secret-detection-report.json | jq '.'
exit 1
fi
test:unit_tests_x_coverage_report:
extends: [.test_rules]
needs:
- job: test:unit_tests_pyt(DEV)_mcore(latest)
- job: test:unit_tests_pyt(LTS)_mcore(latest)
image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID}
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
script:
- env
- export RO_API_TOKEN=${PROJECT_ACCESS_TOKEN_MCORE}
- export GITLAB_ENDPOINT
- python tests/test_utils/python_scripts/download_coverage_results.py --pipeline-id ${CI_PIPELINE_ID}
- coverage combine --keep $(ls coverage_results/*/coverage_report)
- coverage report
- coverage xml
coverage: "/TOTAL.+ ([0-9]{1,3}%)/"
artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: coverage.xml
rules:
- if: $UNIT_TEST == 'yes' && $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED != "true"
allow_failure: true
when: on_success
- if: $UNIT_TEST == 'yes' && $UNIT_TEST_REPEAT != '0'
when: on_success
test:safe_imports:
extends: [.test_rules]
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/builder-large
- team/megatron
services:
- name: docker:24.0.5-dind
variables:
HEALTHCHECK_TCP_PORT: "2376"
variables:
KUBERNETES_SERVICE_MEMORY_REQUEST: 32Gi
KUBERNETES_SERVICE_MEMORY_LIMIT: 32Gi
KUBERNETES_SERVICE_CPU_REQUEST: 8
KUBERNETES_SERVICE_CPU_LIMIT: 12
image:
name: python:3.11
entrypoint: [""]
needs: [test:build_image]
script:
- env
- python -m ensurepip --upgrade
- python -m pip install --no-cache-dir -e .
- python -m pip install --no-cache-dir click
- python .gitlab/scripts/check_imports.py --package-name megatron.core
rules:
- if: $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME != 'main' && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME != 'dev'
when: never
- if: $UNIT_TEST == 'yes' && $CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED != "true"
allow_failure: true
when: on_success
- if: $UNIT_TEST == 'yes' && $UNIT_TEST_REPEAT != '0'
when: on_success
retry:
max: 2
.integration_tests_rules:
stage: integration_tests
rules:
- if: $INTEGRATION_TEST == "yes"
when: on_success
- when: never
default:
id_tokens:
VAULT_JWT_TOKEN:
aud: https://stg.vault.nvidia.com
include:
- project: dl/jet/gitlab-templates
ref: main
file: downstreams.yml
integration:configure:
needs:
- test:build_image
- job: test:unit_tests_pyt(DEV)_mcore(latest)
optional: true
- job: test:unit_tests_pyt(LTS)_mcore(latest)
optional: true
- job: test:build_nemo_image
extends: [.integration_tests_rules]
image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID}
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
before_script:
- git rm -r tests/test_utils/local_recipes || true
- git submodule add --force https://gitlab-ci-token:${CI_JOB_TOKEN}@${GITLAB_ENDPOINT}/ADLR/megatron-lm-convergence-tests.git tests/test_utils/local_recipes
- ls tests/test_utils/local_recipes
script:
- set -x
- |
A100_CLUSTER=$([[ "$CLUSTER_A100" != "" ]] && echo $CLUSTER_A100 || echo $DEFAULT_A100_CLUSTER)
H100_CLUSTER=$([[ "$CLUSTER_H100" != "" ]] && echo $CLUSTER_H100 || echo $DEFAULT_H100_CLUSTER)
- |
ARGS=(
"--scope $INTEGRATION_TEST_SCOPE"
"--n-repeat 1"
"--time-limit $INTEGRATION_TEST_TIME_LIMIT"
"--test-cases $INTEGRATION_TEST_CASES"
"--container-image ${UTILITY_IMAGE}"
"--container-tag ${CI_PIPELINE_ID}"
"--slurm-account ${CI_SLURM_ACCOUNT}"
"--no-enable-warmup"
"--dependent-job integration:configure"
"--enable-lightweight-mode"
)
- |
export PYTHONPATH=$(pwd)
python tests/test_utils/python_scripts/generate_jet_trigger_job.py \
${ARGS[@]} \
--environment dev \
--platform dgx_a100 \
--cluster $A100_CLUSTER \
--output-path "functional-test-job-dev-A100.yaml"
- |
export PYTHONPATH=$(pwd)
python tests/test_utils/python_scripts/generate_jet_trigger_job.py \
${ARGS[@]} \
--environment dev \
--platform dgx_h100 \
--cluster $H100_CLUSTER \
--output-path "functional-test-job-dev-H100.yaml"
- |
export PYTHONPATH=$(pwd)
python tests/test_utils/python_scripts/generate_jet_trigger_job.py \
${ARGS[@]} \
--environment lts \
--platform dgx_a100 \
--cluster $A100_CLUSTER \
--output-path "functional-test-job-lts-A100.yaml"
- |
export PYTHONPATH=$(pwd)
python tests/test_utils/python_scripts/generate_jet_trigger_job.py \
${ARGS[@]} \
--environment lts \
--platform dgx_h100 \
--cluster $H100_CLUSTER \
--output-path "functional-test-job-lts-H100.yaml"
artifacts:
paths:
- functional-test-job-lts-A100.yaml
- functional-test-job-lts-H100.yaml
- functional-test-job-dev-H100.yaml
- functional-test-job-dev-A100.yaml
- tests/test_utils/local_recipes
.integration_run:
needs:
- integration:configure
- test:build_image
- job: wait_for_resources
optional: true
extends: [.integration_tests_rules]
trigger:
include:
- artifact: functional-test-job-$ENVIRONMENT-$CLUSTER.yaml
job: integration:configure
strategy: depend
variables:
RO_API_TOKEN: $PAT
CONTAINER_TAG: $CI_PIPELINE_ID
CI_MCORE_LTS_IMAGE: $CI_MCORE_LTS_IMAGE
GITLAB_ENDPOINT: $GITLAB_ENDPOINT
PARENT_PIPELINE_ID: $CI_PIPELINE_ID
DASHBOARD_ENDPOINT: $DASHBOARD_ENDPOINT
MCORE_MR_COMMIT: $MCORE_MR_COMMIT
MCORE_BACKWARDS_COMMIT: $MCORE_BACKWARDS_COMMIT
inherit:
variables: true
integration:run_lts_dgx_a100:
extends: [.integration_run]
variables:
ENVIRONMENT: lts
CLUSTER: A100
integration:run_lts_dgx_h100:
extends: [.integration_run]
variables:
ENVIRONMENT: lts
CLUSTER: H100
integration:run_dev_dgx_a100:
extends: [.integration_run]
variables:
ENVIRONMENT: dev
CLUSTER: A100
integration:run_dev_dgx_h100:
extends: [.integration_run]
variables:
ENVIRONMENT: dev
CLUSTER: H100
.functional_tests_rules:
stage: functional_tests
rules:
- if: $FUNCTIONAL_TEST == "yes"
when: on_success
- when: never
default:
id_tokens:
VAULT_JWT_TOKEN:
aud: https://stg.vault.nvidia.com
include:
- project: dl/jet/gitlab-templates
ref: main
file: downstreams.yml
functional:configure:
needs:
- test:build_image
- test:build_nemo_image
- job: test:unit_tests_pyt(DEV)_mcore(latest)
optional: true
- job: test:unit_tests_pyt(LTS)_mcore(latest)
optional: true
- job: integration:run_lts_dgx_a100
optional: true
- job: integration:run_dev_dgx_a100
optional: true
- job: integration:run_lts_dgx_h100
optional: true
- job: integration:run_dev_dgx_h100
optional: true
extends: [.functional_tests_rules]
image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID}
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
before_script:
- git rm -r tests/test_utils/local_recipes || true
- git submodule add --force https://gitlab-ci-token:${CI_JOB_TOKEN}@${GITLAB_ENDPOINT}/ADLR/megatron-lm-convergence-tests.git tests/test_utils/local_recipes
- ls tests/test_utils/local_recipes
script:
- set -x
- |
A100_CLUSTER=$([[ "$CLUSTER_A100" != "" ]] && echo $CLUSTER_A100 || echo $DEFAULT_A100_CLUSTER)
H100_CLUSTER=$([[ "$CLUSTER_H100" != "" ]] && echo $CLUSTER_H100 || echo $DEFAULT_H100_CLUSTER)
- |
RECORD_CHECKPOINTS=$([[ "$CI_MERGE_REQUEST_LABELS" == *"Record checkpoints"* || "$FUNCTIONAL_TEST_RECORD_CHECKPOINTS" == "yes" ]] && echo "true" || echo "false")
- |
if [[ "$FUNCTIONAL_TEST_SCOPE" == "release" || "$FUNCTIONAL_TEST_SCOPE" == "weekly" ]]; then
FUNCTIONAL_TEST_NAME=$(eval echo $FUNCTIONAL_TEST_NAME)
RELEASE_ARGS=(
"--run-name"
$FUNCTIONAL_TEST_NAME
"--wandb-experiment"
$(echo $FUNCTIONAL_TEST_NAME | tr '/' '-')
)
else
RELEASE_ARGS=()
fi
- |
ARGS=(
"--scope $FUNCTIONAL_TEST_SCOPE"
"--n-repeat $FUNCTIONAL_TEST_REPEAT"
"--time-limit $FUNCTIONAL_TEST_TIME_LIMIT"
"--test-cases $FUNCTIONAL_TEST_CASES"
"--container-image ${UTILITY_IMAGE}"
"--container-tag ${CI_PIPELINE_ID}"
"--dependent-job functional:configure"
"--record-checkpoints ${RECORD_CHECKPOINTS}"
"--slurm-account ${CI_SLURM_ACCOUNT}"
"--no-enable-warmup"
)
- |
export PYTHONPATH=$(pwd)
python tests/test_utils/python_scripts/generate_jet_trigger_job.py \
${ARGS[@]} \
--environment dev \
--platform dgx_a100 \
--cluster $A100_CLUSTER \
--output-path "functional-test-job-dev-A100.yaml" \
${RELEASE_ARGS[@]}
- |
export PYTHONPATH=$(pwd)
python tests/test_utils/python_scripts/generate_jet_trigger_job.py \
${ARGS[@]} \
--environment dev \
--platform dgx_h100 \
--cluster $H100_CLUSTER \
--output-path "functional-test-job-dev-H100.yaml" \
${RELEASE_ARGS[@]}
- |
export PYTHONPATH=$(pwd)
python tests/test_utils/python_scripts/generate_jet_trigger_job.py \
${ARGS[@]} \
--environment lts \
--platform dgx_a100 \
--cluster $A100_CLUSTER \
--output-path "functional-test-job-lts-A100.yaml" \
${RELEASE_ARGS[@]}
- |
export PYTHONPATH=$(pwd)
python tests/test_utils/python_scripts/generate_jet_trigger_job.py \
${ARGS[@]} \
--environment lts \
--platform dgx_h100 \
--cluster $H100_CLUSTER \
--output-path "functional-test-job-lts-H100.yaml" \
${RELEASE_ARGS[@]}
artifacts:
paths:
- functional-test-job-lts-A100.yaml
- functional-test-job-lts-H100.yaml
- functional-test-job-dev-A100.yaml
- functional-test-job-dev-H100.yaml
- tests/test_utils/local_recipes
.functional_run:
needs:
- functional:configure
- test:build_image
extends: [.functional_tests_rules]
trigger:
include:
- artifact: functional-test-job-$ENVIRONMENT-$CLUSTER.yaml
job: functional:configure
strategy: depend
variables:
RO_API_TOKEN: $PAT
CONTAINER_TAG: $CI_PIPELINE_ID
CI_MCORE_LTS_IMAGE: $CI_MCORE_LTS_IMAGE
GITLAB_ENDPOINT: $GITLAB_ENDPOINT
PARENT_PIPELINE_ID: $CI_PIPELINE_ID
DASHBOARD_ENDPOINT: $DASHBOARD_ENDPOINT
MCORE_MR_COMMIT: $MCORE_MR_COMMIT
MCORE_BACKWARDS_COMMIT: $MCORE_BACKWARDS_COMMIT
CLUSTER: $CLUSTER
inherit:
variables: true
functional:run_lts_dgx_a100:
extends: [.functional_run]
variables:
ENVIRONMENT: lts
CLUSTER: A100
functional:run_lts_dgx_h100:
extends: [.functional_run]
variables:
ENVIRONMENT: lts
CLUSTER: H100
functional:run_dev_dgx_a100:
extends: [.functional_run]
variables:
ENVIRONMENT: dev
CLUSTER: A100
functional:run_dev_dgx_h100:
extends: [.functional_run]
variables:
ENVIRONMENT: dev
CLUSTER: H100
functional:run_nemo:
extends: [.functional_tests_rules]
trigger:
project: 'dl/joc/nemo-ci'
branch: main-mirror
strategy: depend
inherit:
variables: true
variables:
MCORE_COMMIT: $CI_COMMIT_SHA
TEST_NEMO2_MODULE: 'True'
ALLOW_FAILURE_DEPENDENCY: 'True'
TESTS_TO_RUN_ON_THIS_COMMIT: nightly
rules:
- if: $FUNCTIONAL_TEST == "yes"
when: manual
allow_failure: true
- when: never
functional:x_notify:
extends: [.functional_tests_rules]
image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID}
needs:
- functional:run_lts_dgx_a100
- functional:run_dev_dgx_a100
- functional:run_lts_dgx_h100
- functional:run_dev_dgx_h100
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
variables:
RO_API_TOKEN: ${PROJECT_ACCESS_TOKEN_MCORE}
CONTEXT: $FUNCTIONAL_TEST_SCOPE
script:
- env
- |
if [[ "$CI_COMMIT_BRANCH" == *dev* ]]; then
export WEBHOOK_URL=${MCORE_NOTIFICATION_HOOK_DEV}
else
export WEBHOOK_URL=${MCORE_NOTIFICATION_HOOK}
fi
- export RO_API_TOKEN=${PROJECT_ACCESS_TOKEN_MCORE}
- export GITLAB_ENDPOINT
- export CONTEXT=$FUNCTIONAL_TEST_SCOPE
- export TAG_TEAM=$([[ "$CI_COMMIT_BRANCH" == "main" ]] && echo "1" || "0")
- export TEAM_SLUG=$SLACK_ADMIN
- |
python tests/test_utils/python_scripts/notify.py \
--pipeline-id "${CI_PIPELINE_ID}" \
--check-for functional-tests \
--pipeline-context $CONTEXT \
--pipeline-created-at "${CI_PIPELINE_CREATED_AT}"
artifacts:
when: always
paths:
- scripts
rules:
- if: ($CI_PIPELINE_SOURCE == "schedule" || $CI_COMMIT_BRANCH == "main") && $FUNCTIONAL_TEST == "yes"
when: always
- when: never
functional:x_download_golden_values:
extends: [.functional_tests_rules]
image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID}
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
script:
- env
- export RO_API_TOKEN=${PROJECT_ACCESS_TOKEN_MCORE}
- export GITLAB_ENDPOINT
- python tests/test_utils/python_scripts/download_golden_values.py --pipeline-id ${CI_PIPELINE_ID}
artifacts:
paths:
- tests/
rules:
- if: $FUNCTIONAL_TEST == "yes"
when: manual
allow_failure: true
- when: never
.publish_common_freeze:
stage: publish
rules:
- if: ($CI_COMMIT_BRANCH == "main") && $PUBLISH == "yes" && $PUBLISH_SCOPE == "code-freeze"
when: manual
- when: never
.publish_common_release:
stage: publish
rules:
- if: $CI_PIPELINE_SOURCE == "web" && $PUBLISH == "yes" && $PUBLISH_SCOPE == "release"
when: manual
- if: $PUBLISH == "yes" && $PUBLISH_SCOPE == "release"
when: on_success
- when: never
publish:test_release_pypi_build_wheel:
extends: [.test_rules]
stage: publish
image:
name: ${IMAGE}
entrypoint: [""]
services:
- name: docker:24.0.5-dind
variables:
HEALTHCHECK_TCP_PORT: "2376"
needs: [test:build_image]
parallel:
matrix:
- PACKAGE: megatron-core
PLATFORM: arm64
IMAGE: quay.io/pypa/manylinux_2_28_aarch64
- PACKAGE: megatron-core
PLATFORM: amd64
IMAGE: quay.io/pypa/manylinux_2_28_x86_64
- PACKAGE: megatron-fsdp
IMAGE: quay.io/pypa/manylinux_2_28_x86_64
PLATFORM: amd64
tags:
- arch/${PLATFORM}
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/builder-small
- team/megatron
variables:
PY_ENV: pytorch_25.03
KUBERNETES_SERVICE_MEMORY_REQUEST: 16Gi
KUBERNETES_SERVICE_MEMORY_LIMIT: 16Gi
PUBLISH_DRYRUN: "yes"
KUBERNETES_SERVICE_CPU_REQUEST: 4
KUBERNETES_SERVICE_CPU_LIMIT: 8
before_script:
- env
- eval PUBLISH_COMMIT=$PUBLISH_COMMIT
- env
- git fetch origin $PUBLISH_COMMIT
- git checkout $PUBLISH_COMMIT
script:
- set -x
- echo $PUBLISH_DRYRUN
- |
if [ "$PACKAGE" = "megatron-core" ]; then
ROOTDIR="megatron/core"
BUILD_DIR="."
elif [ "$PACKAGE" = "megatron-fsdp" ]; then
ROOTDIR="megatron/core/distributed/fsdp/src/megatron_fsdp"
BUILD_DIR="megatron/core/distributed/fsdp/src"
else
echo Unknown package: $PACKAGE
exit 1
fi
- |
if [ "$PUBLISH_DRYRUN" = "yes" ]; then
PRE_RELEASE=$(sed -n "s/.*PRE_RELEASE = '\(.*\)'/\1/p" $ROOTDIR/package_info.py)
sed -i "/^PRE_RELEASE/c\PRE_RELEASE = '${PRE_RELEASE}.dev$((RANDOM % 900000 + 100000))'" $ROOTDIR/package_info.py
fi
- pushd $BUILD_DIR
- /opt/python/cp310-cp310/bin/python -m build
- /opt/python/cp311-cp311/bin/python -m build
- /opt/python/cp312-cp312/bin/python -m build
- /opt/python/cp313-cp313/bin/python -m build
- USE_DIST_DIR=0
- auditwheel repair dist/*.whl || USE_DIST_DIR=1
- |
if [ "$USE_DIST_DIR" != "1" ]; then
rm -rf dist/*.whl
fi
- popd
- pushd $ROOTDIR
- EXPECTED_RELEASE_NUMBER=$(/opt/python/cp312-cp312/bin/python -c "import package_info; print(package_info.__version__)")
- popd
- echo "EXPECTED_RELEASE_NUMBER_${PLATFORM}_${PACKAGE//-/_}=$EXPECTED_RELEASE_NUMBER" | tee -a build.env
- mkdir -p wheelhouse/
- |
if [ "$PACKAGE" = "megatron-fsdp" ]; then
cp -a megatron/core/distributed/fsdp/src/dist/* wheelhouse/
fi
- |
ls -al wheelhouse/ || true
ls -al dist/ || true
artifacts:
paths:
- megatron/core/package_info.py
- wheelhouse/
- dist/
- megatron/core/distributed/fsdp/src/megatron_fsdp/package_info.py
reports:
dotenv: build.env
retry:
max: 2
publish:test_release_pypi_test_wheel:
extends: [.test_rules]
stage: publish
image:
name: python:3.11
entrypoint: [""]
needs:
- job: publish:test_release_pypi_build_wheel
optional: true
parallel:
matrix:
- PACKAGE: megatron-core
PLATFORM: arm64
- PACKAGE: megatron-core
PLATFORM: amd64
- PACKAGE: megatron-fsdp
PLATFORM: amd64
services:
- name: docker:24.0.5-dind
variables:
HEALTHCHECK_TCP_PORT: "2376"
tags:
- arch/${PLATFORM}
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/builder-small
- team/megatron
variables:
KUBERNETES_SERVICE_MEMORY_REQUEST: 16Gi
KUBERNETES_SERVICE_MEMORY_LIMIT: 16Gi
KUBERNETES_SERVICE_CPU_REQUEST: 4
KUBERNETES_SERVICE_CPU_LIMIT: 8
GIT_STRATEGY: none
PUBLISH_DRYRUN: "yes"
script:
- set -x
- env
- rm -rf megatron
- pip install -U --no-cache-dir pip
- |
if [ "$PACKAGE" = "megatron-core" ]; then
ROOTPATH="megatron.core"
WHEEL_PREFIX="megatron_core"
elif [ "$PACKAGE" = "megatron-fsdp" ]; then
ROOTPATH="megatron_fsdp"
WHEEL_PREFIX="megatron_fsdp"
else
echo Unknown package: $PACKAGE
exit 1
fi
- |
ls -al wheelhouse/ || true
ls -al dist/ || true
ls -al megatron/core/distributed/fsdp/src/megatron_fsdp/package_info.py || true
ls -al megatron/core/distributed/fsdp/src/wheelhouse/ || true
ls -al megatron/core/distributed/fsdp/src/dist/ || true
- |
if [ "$PACKAGE" = "megatron-core" ]; then
if [[ "$PLATFORM" == "arm64" ]]; then
for file in wheelhouse/$WHEEL_PREFIX*cp311*aarch64.whl; do
pip install --no-cache-dir "$file"
done
else
for file in wheelhouse/$WHEEL_PREFIX*cp311*x86_64.whl; do
pip install --no-cache-dir "$file"
done
fi
else
pip install --no-cache-dir wheelhouse/$WHEEL_PREFIX*.whl
fi
- RELEASE_NUMBER=$(python -c "import $ROOTPATH; print($ROOTPATH.__version__)")
- |
var=EXPECTED_RELEASE_NUMBER_${PLATFORM}_${PACKAGE//-/_}
EXPECTED_RELEASE_NUMBER=$(echo "${!var}")
test "$EXPECTED_RELEASE_NUMBER" == "$RELEASE_NUMBER"
- echo "$var=$RELEASE_NUMBER" | tee -a build.env
artifacts:
reports:
dotenv: build.env
paths:
- megatron/core/package_info.py
- wheelhouse/
- dist/
- megatron/core/distributed/fsdp/src/megatron_fsdp/package_info.py
retry:
max: 2
publish:test_release_version_bump:
needs: [publish:test_release_pypi_test_wheel]
extends: [.test_rules]
image: nentangso/alpine-git-curl-jq
stage: publish
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
before_script:
- eval PUBLISH_COMMIT=$PUBLISH_COMMIT
- eval PUBLISH_VERSION_BUMP_BRANCH=$PUBLISH_VERSION_BUMP_BRANCH
- git fetch origin $PUBLISH_COMMIT
- git checkout $PUBLISH_COMMIT
variables:
PUBLISH_DRYRUN: "yes"
script:
- set -x
- env
- echo $PUBLISH_DRYRUN
- ROOTDIR="megatron/core"
- MAJOR=$(cat $ROOTDIR/package_info.py | awk '/^MAJOR = /' | awk -F"= " '{print $2}')
- MINOR=$(cat $ROOTDIR/package_info.py | awk '/^MINOR = /' | awk -F"= " '{print $2}')
- PATCH=$(cat $ROOTDIR/package_info.py | awk '/^PATCH = /' | awk -F"= " '{print $2}')
- PRERELEASE=$(cat $ROOTDIR/package_info.py | awk '/^PRE_RELEASE = /' | awk -F"= " '{print $2}' | tr -d '"' | tr -d "'")
- |
if [[ "$PRERELEASE" != "" ]]; then
NEXT_PATCH=$PATCH
NEXT_PRERELEASE=rc$((${PRERELEASE#rc} + 1))
else
NEXT_PATCH=$((${PATCH} + 1))
NEXT_PRERELEASE=$NEXT_PRERELEASE
fi
- sed -i "/^PATCH/c\PATCH = $NEXT_PATCH" $ROOTDIR/package_info.py
- sed -i "/^PRE_RELEASE/c\PRE_RELEASE = '$NEXT_PRERELEASE'" $ROOTDIR/package_info.py
- ROOTDIR="megatron/core/distributed/fsdp/src/megatron_fsdp"
- MAJOR=$(cat $ROOTDIR/package_info.py | awk '/^MAJOR = /' | awk -F"= " '{print $2}')
- MINOR=$(cat $ROOTDIR/package_info.py | awk '/^MINOR = /' | awk -F"= " '{print $2}')
- PATCH=$(cat $ROOTDIR/package_info.py | awk '/^PATCH = /' | awk -F"= " '{print $2}')
- PRERELEASE=$(cat $ROOTDIR/package_info.py | awk '/^PRE_RELEASE = /' | awk -F"= " '{print $2}' | tr -d '"' | tr -d "'")
- |
if [[ "$PRERELEASE" != "" ]]; then
NEXT_PATCH=$PATCH
NEXT_PRERELEASE=rc$((${PRERELEASE#rc} + 1))
else
NEXT_PATCH=$((${PATCH} + 1))
NEXT_PRERELEASE=$NEXT_PRERELEASE
fi
- sed -i "/^PATCH/c\PATCH = $NEXT_PATCH" $ROOTDIR/package_info.py
- sed -i "/^PRE_RELEASE/c\PRE_RELEASE = '$NEXT_PRERELEASE'" $ROOTDIR/package_info.py
- git config --global user.email "mcore-bot@nvidia.com"
- git config --global user.name "Mcore Bot"
- git remote set-url origin "https://gitlab-ci-token:${PROJECT_ACCESS_TOKEN_MCORE}@${GITLAB_ENDPOINT}/$CI_PROJECT_NAMESPACE/megatron-lm.git"
- |
CMD=$(
cat <<'EOF'
git fetch origin $PUBLISH_VERSION_BUMP_BRANCH && \
git switch $PUBLISH_VERSION_BUMP_BRANCH && \
git add megatron/core/package_info.py && \
git add megatron/core/distributed/fsdp/src/megatron_fsdp/package_info.py && \
git commit -m "chore: Version bump" && \
git push origin $PUBLISH_VERSION_BUMP_BRANCH
EOF
)
- |
if [[ "$PUBLISH_DRYRUN" == "yes" ]]; then
echo "$CMD"
else
eval "$CMD"
fi
publish:test_release_pypi_push_wheel:
extends: [.test_rules]
image: python:3.11
stage: publish
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
needs:
- job: publish:test_release_pypi_test_wheel
optional: true
- job: publish:test_release_version_bump
optional: true
variables:
GIT_STRATEGY: none
PUBLISH_DRYRUN: "yes"
timeout: 3m
script:
- set -x
- echo $PUBLISH_DRYRUN
- |
if [ "$PUBLISH_DRYRUN" = "yes" ]; then
REPOSITORY=testpypi
export TWINE_USERNAME=$TWINE_TEST_USERNAME
export TWINE_PASSWORT=$TWINE_TEST_PASSWORD
else
REPOSITORY=pypi
export TWINE_USERNAME=$TWINE_PROD_USERNAME
export TWINE_PASSWORT=$TWINE_PROD_PASSWORD
fi
- ls -al dist/
- ls -al wheelhouse/
- pip install twine
- |
CMD=$(echo -E "twine upload --verbose -u $TWINE_USERNAME -p $TWINE_PASSWORT --repository $REPOSITORY wheelhouse/* dist/*")
if [[ "$PUBLISH_DRYRUN" != "yes" ]]; then
eval "$CMD"
fi
publish:test_release_github:
extends: [.test_rules]
needs:
- job: publish:test_release_pypi_test_wheel
optional: true
- job: publish:test_release_version_bump
optional: true
stage: publish
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
image: nentangso/alpine-git-curl-jq
before_script:
- eval PUBLISH_COMMIT=$PUBLISH_COMMIT
- git fetch origin $PUBLISH_COMMIT
- git checkout $PUBLISH_COMMIT
variables:
PUBLISH_DRYRUN: "yes"
script:
- set -x
- env
- apk add --no-cache bash
- |
bash <<'EOF'
set -x
echo $PUBLISH_DRYRUN
PLATFORM=amd64
PACKAGE=megatron-core
var=EXPECTED_RELEASE_NUMBER_${PLATFORM}_${PACKAGE//-/_}
RELEASE_NUMBER=$(echo "${!var}")
NAME="NVIDIA Megatron Core $RELEASE_NUMBER"
IS_PRERELEASE=$([[ "$RELEASE_NUMBER" == *rc* ]] && echo "true" || echo "false")
if [[ "$IS_PRERELEASE" == "true" ]]; then
DATE=$(date +"%Y-%m-%d")
CHANGELOG="Prerelease: $NAME ($DATE)"
else
CHANGELOG=$(awk '/^## '"$NAME"'/{flag=1; next} /^## /{flag=0} flag' CHANGELOG.md)
CHANGELOG=$(echo "$CHANGELOG" | sed '/./!d')
fi
PAYLOAD=$(jq -nc \
--arg TAG_NAME "core_v${RELEASE_NUMBER}" \
--arg CI_COMMIT_SHA "$PUBLISH_COMMIT" \
--arg NAME "$NAME" \
--arg BODY "$CHANGELOG" \
--argjson PRERELEASE "$IS_PRERELEASE" \
'{
"tag_name": $TAG_NAME,
"target_commitish": $CI_COMMIT_SHA,
"name": $NAME,
"body": $BODY,
"draft": false,
"prerelease": $PRERELEASE,
"generate_release_notes": false
}'
)
echo -E "$PAYLOAD" | tee -a payload.txt
cat payload.txt
CMD=$(echo -E 'curl -L \
-X POST \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer '"$GH_TOKEN"'" \
-H "X-GitHub-Api-Version: 2022-11-28" \
https://api.github.com/repos/NVIDIA/Megatron-LM/releases \
-d @payload.txt
')
if [[ "$PUBLISH_DRYRUN" == "yes" ]]; then
echo -E "$CMD"
else
eval "$CMD"
fi
EOF
publish:test_release_notify:
needs: [publish:test_release_pypi_test_wheel, publish:test_release_pypi_push_wheel, publish:test_release_github]
extends: [.test_rules]
image: badouralix/curl-jq
stage: publish
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
variables:
PUBLISH_DRYRUN: "yes"
script:
- set -x
- env
- apk add --no-cache bash
- |
bash <<'EOF'
set -x
echo $PUBLISH_DRYRUN
PLATFORM=amd64
PACKAGE=megatron-core
var=EXPECTED_RELEASE_NUMBER_${PLATFORM}_${PACKAGE//-/_}
RELEASE_NUMBER=$(echo "${!var}")
URL="https://github.com/NVIDIA/Megatron-LM/releases/tag/core_v$RELEASE_NUMBER"
cat <<MSG > message.json
{
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "Releasebot 🤖: Megatron-Core released <${URL}|core_v${RELEASE_NUMBER}> 🚀"
}
}
]
}
MSG
cat message.json
CMD=$(echo curl \
-X POST \
-H "Content-type: application/json" \
-d @message.json ${MCORE_NOTIFICATION_HOOK_MAIN}
)
if [[ "$PUBLISH_DRYRUN" == "yes" ]]; then
echo "$CMD"
else
eval "$CMD"
fi
EOF
publish:release_pypi_build_wheel:
extends: [publish:test_release_pypi_build_wheel, .publish_common_release]
dependencies: []
variables:
PUBLISH_DRYRUN: "no"
publish:release_pypi_test_wheel:
extends: [publish:test_release_pypi_test_wheel, .publish_common_release]
needs: [publish:release_pypi_build_wheel]
variables:
PUBLISH_DRYRUN: "no"
publish:release_version_bump:
needs: [publish:release_pypi_test_wheel]
extends: [publish:test_release_version_bump, .publish_common_release]
variables:
PUBLISH_DRYRUN: "no"
publish:release_pypi_push_wheel:
extends: [publish:test_release_pypi_push_wheel, .publish_common_release]
needs: [publish:release_pypi_test_wheel, publish:release_version_bump]
dependencies: [publish:release_pypi_test_wheel]
variables:
PUBLISH_DRYRUN: "no"
publish:release_github:
extends: [publish:test_release_github, .publish_common_release]
needs: [publish:release_pypi_test_wheel, publish:release_version_bump]
dependencies: [publish:release_pypi_test_wheel]
variables:
PUBLISH_DRYRUN: "no"
publish:release_notify:
needs: [publish:release_pypi_test_wheel, publish:release_pypi_push_wheel, publish:release_github]
extends: [publish:test_release_notify, .publish_common_release]
dependencies: [publish:release_pypi_test_wheel]
variables:
PUBLISH_DRYRUN: "no"
publish:docs:
extends: [.publish_common_release]
image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID}
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
before_script:
- eval PUBLISH_COMMIT=$PUBLISH_COMMIT
- git fetch origin '+refs/merge-requests/*:refs/remotes/merge-requests/*'
- git fetch origin $PUBLISH_COMMIT
- git checkout $PUBLISH_COMMIT
script:
- cd ..
- rm -rf documentation && git clone --recursive https://gitlab-ci-token:${PAT}@${GITLAB_ENDPOINT}/nemo-megatron-core-tme/documentation.git
- cd documentation/megatron-lm
- git config --global user.email "mcore-bot@nvidia.com"
- git config --global user.name "Mcore Bot"
- git fetch origin '+refs/merge-requests/*:refs/remotes/merge-requests/*'
- git fetch origin $PUBLISH_COMMIT
- git checkout $PUBLISH_COMMIT
- cd ..
- git add megatron-lm
- |
git commit -m 'feat: Bump mcore'
- git push
rules:
- if: '$CI_COMMIT_BRANCH == "main" && $CI_PIPELINE_SOURCE == "push"'
allow_failure: true
- when: never
publish:upload_statistics:
stage: publish
image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID}
needs:
- job: test:unit_tests_pyt(DEV)_mcore(legacy)
optional: true
- job: test:unit_tests_pyt(LTS)_mcore(legacy)
optional: true
- job: test:unit_tests_pyt(DEV)_mcore(latest)
- job: test:unit_tests_pyt(LTS)_mcore(latest)
- job: functional:run_lts_dgx_a100
optional: true
- job: functional:run_lts_dgx_h100
optional: true
- job: functional:run_dev_dgx_a100
optional: true
- job: functional:run_dev_dgx_h100
optional: true
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
script:
- env
- export RO_API_TOKEN=${PROJECT_ACCESS_TOKEN_MCORE}
- export GITLAB_ENDPOINT
- export DASHBOARD_ENDPOINT
- python tests/test_utils/python_scripts/dashboard.py --pipeline-id ${CI_PIPELINE_ID}
rules:
- if: ($CI_MERGE_REQUEST_EVENT_TYPE == 'merged_result' || $CI_MERGE_REQUEST_EVENT_TYPE == 'merge_train') && ($UNIT_TEST == "yes" || $INTEGRATION_TEST == "yes" || $FUNCTIONAL_TEST == "yes")
when: always
allow_failure: true
- when: never
public:review_reminder:
stage: publish
image: ${UTILITY_IMAGE}:${CI_PIPELINE_ID}
script:
- export GITLAB_ENDPOINT
- export RO_API_TOKEN=${PAT}
- export SLACK_WEBHOOK_URL=${SLACK_REMINDER_HOOK}
- export SLACK_API_TOKEN=${SLACK_API_TOKEN}
- python tests/test_utils/python_scripts/auto_reminder.py
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
rules:
- if: $CI_COMMIT_BRANCH == "ci-review-reminder" && $PUBLISH == "yes" && $PUBLISH_SCOPE == "review-reminder"
- when: never
publish:code_freeze:
extends: [.publish_common_freeze]
image: ${CI_MCORE_LTS_IMAGE}:${CI_PIPELINE_ID}
needs: [test:build_image]
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
script:
- git fetch origin main
- git config --global user.email "mcore-bot@nvidia.com"
- git config --global user.name "Mcore Bot"
- git remote set-url origin "https://gitlab-ci-token:${PAT}@${GITLAB_ENDPOINT}/$CI_PROJECT_NAMESPACE/megatron-lm.git"
- sed -i "/^PRE_RELEASE/c\PRE_RELEASE = ''" megatron/core/package_info.py
- VERSION=$(python -c "from megatron import core; print(core.__version__)")
- RELEASE_BRANCH=core_r$VERSION
- git switch --force-create $RELEASE_BRANCH origin/main
- git push -u origin $RELEASE_BRANCH
- |
MESSAGE='{
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "Releasebot 🤖: Megatron Core has been frozen 🎉 to branch `'"$RELEASE_BRANCH"'`"
}
}
]
}'
- |
curl -X POST -H "Content-type: application/json" --data "$MESSAGE" ${MCORE_NOTIFICATION_HOOK_MAIN}
- git switch main
- git switch --force-create bot/chore/bump-version
- git add megatron/core/package_info.py
- |
git commit -m "chore: adjust version version"
- git push -u origin bot/chore/bump-version
- |
curl \
--header "PRIVATE-TOKEN: $PAT" \
--url https://${GITLAB_ENDPOINT}/api/v4/projects/${CI_PROJECT_ID}/merge_requests \
-d "source_branch=bot/chore/bump-version" \
-d "target_branch=$RELEASE_BRANCH" \
-d "title=chore: Fix version of \`$RELEASE_BRANCH\`" \
-d "description=[🤖]: Hi @okoenig 👋,<br><br>we've adjusted the version number of \`$RELEASE_BRANCH\` for you! 🚀<br><br>Please review and approve this cherry pick by your convenience\!"
publish:upgrade_dependencies:
stage: publish
image: ${CI_MCORE_DEV_IMAGE}:${CI_PIPELINE_ID}
script:
- export GITLAB_ENDPOINT
- export RO_API_TOKEN=${PAT}
- export BRANCH_NAME=ci-bot/build/upgrade-dependencies-$(date +%Y-%m-%d)
- uv lock --upgrade
- git checkout -b $BRANCH_NAME
- git add uv.lock pyproject.toml
- git config --global user.email "mcore-bot@nvidia.com"
- git config --global user.name "Mcore Bot"
- git remote set-url origin "https://gitlab-ci-token:${PAT}@${GITLAB_ENDPOINT}/$CI_PROJECT_NAMESPACE/megatron-lm.git"
- |
git commit -m "chore: Upgrade dependencies"
- git push --force -u origin $BRANCH_NAME
- |
curl \
--header "PRIVATE-TOKEN: $PROJECT_ACCESS_TOKEN_MCORE" \
--url https://${GITLAB_ENDPOINT}/api/v4/projects/${CI_PROJECT_ID}/merge_requests \
-d "source_branch=$BRANCH_NAME" \
-d "target_branch=main" \
-d "title=chore: Upgrade dependencies ($(date +%Y-%m-%d))" \
-d "labels=test::Run functional tests" \
-d "description=[🤖]: Hi @okoenig 👋,<br><br>we've upgraded the dependencies of \`$BRANCH_NAME\` for you! 🚀<br><br>Please review and approve this cherry pick by your convenience\!"
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
rules:
- if: $CI_COMMIT_BRANCH == "ci-upgrade-dependencies" && $PUBLISH == "yes" && $PUBLISH_SCOPE == "upgrade-dependencies"
- when: never
publish:merge_into_dev:
stage: publish
image: ${CI_MCORE_DEV_IMAGE}:${CI_PIPELINE_ID}
script:
- export GITLAB_ENDPOINT
- export RO_API_TOKEN=${PAT}
- |
git config --global user.email "mcore-bot@nvidia.com"
git config --global user.name "Mcore Bot"
- SOURCE_BRANCH=ci/merge-into-dev
- |
set -x
set +e
SOURCE_BRANCH_EXISTS=$([[ "$(git ls-remote --heads origin refs/heads/$SOURCE_BRANCH)" != "" ]] && echo true || echo false)
if [[ "$SOURCE_BRANCH_EXISTS" == "false" ]]; then
git fetch origin dev
git checkout -b $SOURCE_BRANCH origin/dev
else
git fetch origin $SOURCE_BRANCH
git checkout origin/$SOURCE_BRANCH
fi
git fetch origin main
git merge origin/main
CLEAN=$?
set -e
- |
if [[ "$CLEAN" -ne 0 ]]; then
echo "Merge failed"
URL="https://${GITLAB_ENDPOINT}/${CI_PROJECT_PATH}/-/commit/${CI_COMMIT_SHA}"
SHORT_SHA=$(git rev-parse --short HEAD)
MESSAGE='{
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "beep boop 🤖: Cherry-picking main (<'$URL'|'${SHORT_SHA}'>) into dev failed.\nPlease merge it manually into '$SOURCE_BRANCH'.\n\ncc '$SLACK_ADMIN_DEV'"
}
}
]
}'
curl -X POST -H "Content-type: application/json" --data "$MESSAGE" ${MCORE_NOTIFICATION_HOOK_DEV}
exit 1
fi
- git push -u origin ci/merge-into-dev
- |
curl \
--header "PRIVATE-TOKEN: $PROJECT_ACCESS_TOKEN_MCORE" \
--url https://${GITLAB_ENDPOINT}/api/v4/projects/${CI_PROJECT_ID}/merge_requests \
-d "source_branch=$SOURCE_BRANCH" \
-d "target_branch=dev" \
-d "title=chore: Merge into dev" \
-d "labels=test::Run functional tests" \
-d "merge_when_pipeline_succeeds=true" \
-d "description=[🤖]: Hi @zijiey 👋,<br><br>merging \`$SOURCE_BRANCH\` into \`dev\` for you! 🚀<br><br>Please review and approve this cherry pick by your convenience\!"
tags:
- arch/amd64
- env/prod
- origin/jet-fleet
- owner/jet-core
- purpose/utility
- team/megatron
rules:
- if: $CI_COMMIT_BRANCH == "main" && $CI_PIPELINE_SOURCE == "push"
allow_failure: true
- when: never
repos:
- repo: https://github.com/psf/black
rev: 'refs/tags/24.4.2:refs/tags/24.4.2'
hooks:
- id: black
files: ^megatron/core/.*|^tests/unit_tests/.*
args: ["--skip-magic-trailing-comma", "--skip-string-normalization"]
- repo: https://github.com/pycqa/pylint
rev: v3.2.6
hooks:
- id: pylint
files: ^megatron/core/.*
- repo: https://github.com/pycqa/isort
rev: 5.13.2
hooks:
- id: isort
files: ^megatron/core/.*
\ No newline at end of file
[MAIN]
ignore-paths=tests
max-line-length=100
load-plugins=pylint.extensions.bad_builtin
[MESSAGES CONTROL]
disable=all
enable=C0115,C0116,W0611,C0301,E0606,W0141
# C0115: missing-class-docstring
# C0116: missing-function-docstring
# W0611: unused-import
# C0301: line-too-long
# E0606: possibly-used-before-assignment
# W0141: bad-builtin (from bad_builtin extension)
[BASIC]
bad-functions=print
[BAD_BUILTIN]
# Specify which builtins should be flagged
bad-builtins=print
\ No newline at end of file
3.12
\ No newline at end of file
# Changelog
## NVIDIA Megatron Core 0.15.0
* Features
* Performance
* Fused QKV preprocessing with precomputed RoPE caches (3x preprocessing speedup, 10-14% E2E) ([MR \!3912](https://github.com/NVIDIA/Megatron-LM/commit/f0d9fa97fead9825ae3eada36ee2df568bfa415b))
* Use new TE interface for user buffers ([MR \!3886](https://github.com/NVIDIA/Megatron-LM/commit/d47b83807142b6490c7a000e63d25a479b106fd9))
* Add CPU activation offloading via TE ([MR \!4286](https://github.com/NVIDIA/Megatron-LM/commit/310671436c36e6bd198e92c4f30bc84469cc31d8))
* Add configurable double buffering ([MR \!4026](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4026))
* Add Muon optimizer and distributed optimizer support ([MR \!4106](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4106))
* Add setting to support Adam or AdamW optimizer ([MR \!3866](https://github.com/NVIDIA/Megatron-LM/commit/03fd0b41b3840c6f19558161d98373a9242402e5))
* MoE
* Add DTensor support for EP and DSv3 modules ([MR \!3955](https://github.com/NVIDIA/Megatron-LM/commit/268fda08592528b7bc1a21aadaed259980ca8efb))
* Add HybridEP backend to Flex Dispatcher ([MR \!4237](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4237))
* Support FP8 recomputation for MoE components ([MR \!4030](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4030))
* Implement NVFP4 Zero Padding for MoE ([MR \!4225](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4225))
* Compute shared experts before router ([MR \!4068](https://github.com/NVIDIA/Megatron-LM/commit/e8024d716f3036ebcef8c5254c7830ad09aaf41b))
* Enable bias in expert MLP ([MR \!3858](https://github.com/NVIDIA/Megatron-LM/commit/a329dd6da586261a45a8f7d04c1e659ffedd80ae))
* Model support
* Add YaRN support for GPT-OSS ([MR \!4044](https://github.com/NVIDIA/Megatron-LM/commit/2c1b77a9984bfa978e7cf1f58522e5f8e045d017))
* Add support for Qwen3-Next arguments ([MR \!4070](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4070))
* Add FP8 init for MTP ([MR \!3958](https://github.com/NVIDIA/Megatron-LM/commit/d6c6e54ec5eb43d4e196c7ae84e0e88f28613e6b))
* Add fp8\_dpa option for FP8 scaling ([MR \!4053](https://github.com/NVIDIA/Megatron-LM/commit/61047e60e617e71ebe120ec293b62df6b0efc84f))
* Add RADIO-g support to converter and tester ([MR \!4371](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4371))
* Add audio semantic reasoning data for voice chat and speech instructions ([MR \!4397](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4397))
* FSDP
* Enable joint training of parallel modules ([MR \!3850](https://github.com/NVIDIA/Megatron-LM/commit/53008b844f98886a2144c216ecd25952cb2dda58))
* Add support for multimodule communication ([MR \!4235](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4235))
* Inference
* Add CUDA Graph runner lookup table cache (up to 2x E2E speedup) ([MR \!4082](https://github.com/NVIDIA/Megatron-LM/commit/ab43252fdbedcc3662014ae0e110bd3278d844f4))
* Add MoE dropping and padding router for CUDA Graph \+ decode ([MR \!3816](https://github.com/NVIDIA/Megatron-LM/commit/56818f9e5090ff9eb0f13f10bfe408aae4031c5c))
* Dynamic audio shapes with variable sequence lengths (2.5x throughput improvement) ([MR \!4274](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4274))
* Integrate unified memory for dynamic inference context ([MR \!3985](https://github.com/NVIDIA/Megatron-LM/commit/ef4ae4528a0924159069b9f3a2719616156bafa2))
* Post-training
* Add GPT-OSS ModelOpt support with quantization, import/export ([MR \!4169](https://github.com/NVIDIA/Megatron-LM/commit/a2d8c806b35bc708b13e6c069e19e5dfb49b8481))
* Enable KD support with hybrid training loop ([MR \!4021](https://github.com/NVIDIA/Megatron-LM/commit/48d7275062a8307f82bd0fa6c1504032c7f3af96))
* Add ModelOpt pruning example ([MR \!4022](https://github.com/NVIDIA/Megatron-LM/commit/5a58976ebe007064c2ff5e76e815aa5fcf1a8787))
* RL
* Add importance sampling and partial rollouts to Megatron RL ([MR \!4000](https://github.com/NVIDIA/Megatron-LM/commit/8399280ed3b72a183f44820896a67392c0a47e3e))
* Add sequence packing for RL ([MR \!4191](https://github.com/NVIDIA/Megatron-LM/commit/ee8e9307f3ad655e6a46f98a483d8192995b02c2))
* Ease of use
* Handle CUDA absence during import ([MR \!4120](https://github.com/NVIDIA/Megatron-LM/commit/ae44e49271dc45b51a7400ecf6debc598ba90b54))
* Add granary dataloader functionality ([MR \!4291](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4291))
* Enable SWA mixing with attention ([MR \!3855](https://github.com/NVIDIA/Megatron-LM/commit/e5bc9249d7ad34355f5db4c8ff7d7a9080f94dc2))
* Bug fixes
* Fix convergence bug in MXFP8 parameter gradient buffer reuse ([MR \!3999](https://github.com/NVIDIA/Megatron-LM/commit/c2c36f77cf7a0476daee5bb2dec604c2764de320))
* Fix loss mask cloning to prevent incorrect updates ([MR \!4164](https://github.com/NVIDIA/Megatron-LM/commit/c94d58f3260aa568588265e07b3c06bb58cbde41))
* Fix metadata loss in checkpoints ([MR \!4182](https://github.com/NVIDIA/Megatron-LM/commit/d8c6aa4c0b5d4c15ec1196802bce292d4580ed4a))
* Fix FSDP grad accum fusion support ([MR \!4018](https://github.com/NVIDIA/Megatron-LM/commit/9f72f4775509668173c75eaab5d58a49f4473748))
* Fix non-TE optimizer checkpoint issue ([MR \!3931](https://github.com/NVIDIA/Megatron-LM/commit/2ebb6ee95af8b547e3c0ac394d494cb189b890bc))
* Fix BERT virtual pipeline parallelism ([MR \!3993](https://github.com/NVIDIA/Megatron-LM/commit/18420b63408101fe5a49d125fb29625f1ad6ab26))
* Fix gc.freeze() slowdown by adding gc.collect() on last layer ([MR \!4003](https://github.com/NVIDIA/Megatron-LM/commit/a3f9e566c9595753553a73d403b2a481ad283fc0))
* Fix full iteration CUDA graph non-tensor handling ([MR \!4019](https://github.com/NVIDIA/Megatron-LM/commit/8479eb35fbca9631acb846c3ad5d868e02214227))
* Fix model\_auto\_sync mis-set and add gradient assertion ([MR \!4062](https://github.com/NVIDIA/Megatron-LM/commit/03045f2d880813695f75707e3262a2bfb4206dfe))
* Fix HF import dtype and checkpoint loading issues ([MR \!4095](https://github.com/NVIDIA/Megatron-LM/commit/435e7e0620ff870d99debd73b3c9113226622dde))
* Fix missing initialization in ProcessGroupCollection ([MR \!4159](https://github.com/NVIDIA/Megatron-LM/commit/5f2becf232a85df8687dc539e604e00a6a875da1))
* Fix sink attention TP ([MR \!4173](https://github.com/NVIDIA/Megatron-LM/commit/3b1b9b267193d72d4f8dc710561c2368de8c114c))
* Fix num\_microbatches calculation ([MR \!4199](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4199))
* Fix 1f1b overlap unit tests for MTP standalone ([MR \!4210](https://github.com/NVIDIA/Megatron-LM/commit/44bc753d69cf509c158bb261434498b141fe5130))
* Fix stale state dict handling ([MR \!4226](https://github.com/NVIDIA/Megatron-LM/commit/0ba847081113a92ce01084f33cd4a0c1f31b327b))
* Fix dataset divergence with tokenizer PAD handling ([MR \!4231](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4231))
* Fix parameter initialization ([MR \!4296](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4296))
* Ensure tensor-parallel attributes set regardless of initialization flag ([MR \!4312](https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/merge_requests/4312))
* Known issues
## NVIDIA Megatron Core 0.14.0
* Features
* Inference
* Add async support for DynamicInferenceEngine ([MR \!3187](https://github.com/NVIDIA/Megatron-LM/commit/05079d55a5bfcc7a43f4619e36a40a9e8db3f882))
* Pad input tensors and enable FP8 weights for FP8 inference ([MR \!3341](https://github.com/NVIDIA/Megatron-LM/commit/6a6cd478839d90cf09a837adf8c79cbc844bc920))
* Force inference to always gather logits with tensor parallelism ([MR \!3442](https://github.com/NVIDIA/Megatron-LM/commit/7c9cdcb794089968278c7272e0261a68edf5d369))
* Multi batch size CUDA Graphs for Dynamic Inference ([MR \!3402](https://github.com/NVIDIA/Megatron-LM/commit/30aabe5e3133c6d70aa55aaabad4ea8cb04ce63c))
* Post-training
* ModelOpt updates ([MR \!3268](https://github.com/NVIDIA/Megatron-LM/commit/550ed5243c3a18e39430c15e8918ee63e41d7eaf))
* Add speculative decoding AR validation feature
* Add DeepSeek and Qwen model configs
* Performance
* ModelCommProcessGroup integration ([MR \!3391](https://github.com/NVIDIA/Megatron-LM/commit/26adc2dfde53fbc2b063e2fdd1d9ed26578811a6))
* Add HyperCommGrid: N-Dimensional Communication Grid for Model Parallelism ([MR \!3398](https://github.com/NVIDIA/Megatron-LM/commit/45400df7da7fa23e3aff86804e5ac254d9a8d3c0))
* Flexible creation and management of communication groups
* Add support for Spike No More embedding initializations and weight decay skipping ([MR \!3500](https://github.com/NVIDIA/Megatron-LM/commit/ee74aa66a06b24e511270f285db475941ef63bfd))
* MoE
* We're actively optimizing large-scale fine-grained MoE performance on Blackwell Platform.
* Features:
* Support Expert Parallel A2A Overlapping ([MR \!3470](https://github.com/NVIDIA/Megatron-LM/commit/0c6c1176fb3e3e00534b3591f1ad023d4ecad6fb); [MR \!3074](https://github.com/NVIDIA/Megatron-LM/commit/4b30ec54aba97e16a083eca33d2df1dd48e1b48f))
* Support CP and recompute for MTP ([MR \!3330](https://github.com/NVIDIA/Megatron-LM/commit/650ab87d04105869f197f2ddc441e3b18ca93724))
* Add support for global aux loss ([MR \!3318](https://github.com/NVIDIA/Megatron-LM/commit/e58d9080ea212e005ccba0b6607bfcc86451285d))
* Memory Optimization
* Support recomputation for FP8 layernorm/moe\_act/shared\_experts ([MR \!3465](https://github.com/NVIDIA/Megatron-LM/commit/6850cc6a739d168f8c84db6cdacf4fe2931c0c49))
* Support optimizer offloading for DSV3 FP8 training ([MR \!3659](https://github.com/NVIDIA/Megatron-LM/commit/abbde02f54b62a5194ebe951218e98feceba6d42))
* Performance Optimization
* Add MoE router fusion ([MR \!3809](https://github.com/NVIDIA/Megatron-LM/commit/d93743a9f11d5d17824b8b49868cc90f2904896f))
* Updates for MoE cudagraph ([MR \!3631](https://github.com/NVIDIA/Megatron-LM/commit/95452706d7aa16dc174813e12639a8c8356fbe87))
* Bug fixes:
* Fix router input jitter dtype ([MR \!3774](https://github.com/NVIDIA/Megatron-LM/commit/20b395424d2e2bbfaab57b2f954294eb57c90c82))
* Model support
* Add MiMo video VLM train example ([MR \!3543](https://github.com/NVIDIA/Megatron-LM/commit/786f5629d3462aff2f8855f51db70e882c475116))
* Add AVLM for MIMO ([MR \!3624](https://github.com/NVIDIA/Megatron-LM/commit/db41707430bff743f986b5779712c74242b99caa))
* Ease of use
* Add uv support for source installs ([MR \!3615](https://github.com/NVIDIA/Megatron-LM/commit/164204cd7216e642bdef7299c569d95f02f9a79e))
* Automated weekly prereleases ([MR \!3574](https://github.com/NVIDIA/Megatron-LM/commit/7e59266c70ef34a246438640af690b55c7ecac28))
* Bug fixes
* Use mscale\_all\_dim for softmax\_factor ([MR \!2800](https://github.com/NVIDIA/Megatron-LM/commit/e96a358f60c82b8ac8d965d91c3cc4ad0230a4e0))
* Fix FP8 param blockwise scaling unit test ([MR \!3480](https://github.com/NVIDIA/Megatron-LM/commit/57082f946a04c3390fcfc43634dc546ec3ded033))
* Fix unit test blockwise scaling ([MR \!3491](https://github.com/NVIDIA/Megatron-LM/commit/6d95fe63658f967e56a3fda88a9c30a424fcb520))
* Optimize prefill for token-less requests ([MR \!3499](https://github.com/NVIDIA/Megatron-LM/commit/daaa650a9ac4291d4027ca2fdeb4298ce024efd2))
* Add default values for Fp8Padding and Fp8Unpadding ([MR \!3501](https://github.com/NVIDIA/Megatron-LM/commit/42b2b1d10a9cb699b7e5aa40f6bfba9c2a1348aa))
* Fix CUDA graph logic for flexible pp layout ([MR \!3505](https://github.com/NVIDIA/Megatron-LM/commit/020d85e50ddf0f0282802002acb3662129a519c5))
* Load FP8 models with strict=False ([MR \!3508](https://github.com/NVIDIA/Megatron-LM/commit/1ab876ddc4c1893c76f26d775226a8d1dcdfb3d2))
* Skip rope check for torch \< 1.4.0 ([MR \!3528](https://github.com/NVIDIA/Megatron-LM/commit/d8180ef8ed0bb6f305dcdedf1b27d91304f361a3))
* Disable Apex tests for stability ([MR \!3539](https://github.com/NVIDIA/Megatron-LM/commit/d1256277fe378add0a2cfd7251f5a350b6d126ec))
* Fix typo in parallel\_state expert parallelism ([MR \!3548](https://github.com/NVIDIA/Megatron-LM/commit/5783ff32af759b8102cf0cb0bb82b30c48b9da26))
* Guard modelopt on macOS ([MR \!3549](https://github.com/NVIDIA/Megatron-LM/commit/76144fe1106e4fb0e69aa75b7a6ab66e71e8f37f))
* Retry on CUDA function failure ([MR \!3554](https://github.com/NVIDIA/Megatron-LM/commit/809aab68307a64c1386d68cc78ef70f8f4e12a80))
* Fix NCCL mem pool creation error ([MR \!3557](https://github.com/NVIDIA/Megatron-LM/commit/b61e21153146a563309b5d44cb5d7f7425806072))
* Fix get\_rotary\_seq\_len return type ([MR \!3559](https://github.com/NVIDIA/Megatron-LM/commit/1fa6bc83c7aeae95abc8e86ff0aac596985a01c3))
* Retry on CUDA function failure ([MR \!3560](https://github.com/NVIDIA/Megatron-LM/commit/7da88d74865c3f1a59894173246f26e7b3bf91b9))
* Fix NCCL allocator attribute error ([MR \!3565](https://github.com/NVIDIA/Megatron-LM/commit/6b656114795d74c3353cb007c59af49b1752f447))
* Ensure multi-prompt inference works ([MR \!3568](https://github.com/NVIDIA/Megatron-LM/commit/0fae48931000c9c7af06f7dcf037b5b7d96e0cd6))
* Fix MD5 on FIPS systems ([MR \!3577](https://github.com/NVIDIA/Megatron-LM/commit/83ee8c2848a3b1d42b40086a64da11e19f4b191f))
* Fixes dynamic context and inference bugs ([MR \!3582](https://github.com/NVIDIA/Megatron-LM/commit/e9c1da60a1ccc85376666d58568ed1d3e5a4f9db))
* Fix TE version for interleaved fused RoPE ([MR \!3586](https://github.com/NVIDIA/Megatron-LM/commit/b72b6cc161f5273b545bca09677382917cf20492))
* Fix MTP with MoE and TP logging ([MR \!3594](https://github.com/NVIDIA/Megatron-LM/commit/9af96623b66693e058f6bfce8d0094dc976792d8))
* Guard TE import fix ([MR \!3596](https://github.com/NVIDIA/Megatron-LM/commit/1bf946b1ec3f11e71459c7c0d06a97edbed96a1a))
* Add assertion for NCCL UB case ([MR \!3599](https://github.com/NVIDIA/Megatron-LM/commit/e11d28592f19c122859be764b7afe7c208d9acc1))
* Remove Encoder PP related Functions ([MR \!3604](https://github.com/NVIDIA/Megatron-LM/commit/9e49aa4446a58cc21c4dc0c5d0806551ad075ca7))
* Fix segfaults in tests ([MR \!3605](https://github.com/NVIDIA/Megatron-LM/commit/f6492fe8164fd5b9ad55007d435ccfc66cb98cc7))
* Fix TE error in distributed optimizer ([MR \!3625](https://github.com/NVIDIA/Megatron-LM/commit/e6c510ff3c1159f8955589b26f7c395bdf0607d9))
* Remove redundant barrier in checkpoint flow ([MR \!3626](https://github.com/NVIDIA/Megatron-LM/commit/26869feb6a3ac7f5616cb7253c37a4244d107d70))
* Support VPP MTP, fix logging ([MR \!3630](https://github.com/NVIDIA/Megatron-LM/commit/c351a473c7eedac2c43eab0815afb9759f4f8187))
* Retry mechanism for free(): invalid pointer errors ([MR \!3632](https://github.com/NVIDIA/Megatron-LM/commit/ec35b41b2df145a7ccb84afc48d94e0786e094da))
* Fix test\_replication.py issues ([MR \!3633](https://github.com/NVIDIA/Megatron-LM/commit/f7b50b271b2e0e396069e02551b21aa6fb374b43))
* Fix typo in parallel\_state ([MR \!3634](https://github.com/NVIDIA/Megatron-LM/commit/3c79a2c330290df58804c33e28e7c197fcc1f0b9))
* Fix CUDA graph logic determination ([MR \!3635](https://github.com/NVIDIA/Megatron-LM/commit/90efa3ef8a3c4f9e0f1db9f67ab9348bfa501387))
* Fix TE installation error ([MR \!3636](https://github.com/NVIDIA/Megatron-LM/commit/7e7322c01c9cb8ec254ecd9042700b22b70fe5c8))
* Ensure correct sharding type in local tests ([MR \!3643](https://github.com/NVIDIA/Megatron-LM/commit/946357f8dd7fdc12424b3a66bc999e6c0a02696c))
* Fix cudagraphed backward buffer reuse for last layer ([MR \!3645](https://github.com/NVIDIA/Megatron-LM/commit/ee61cf450d24760952e8995aab045ab6d55b986e))
* Set default for packed\_seq\_params in get\_rotary\_seq\_len ([MR \!3651](https://github.com/NVIDIA/Megatron-LM/commit/510d58c46664f44c556005ac928c5c531e12f761))
* Fix dynamic example script errors ([MR \!3653](https://github.com/NVIDIA/Megatron-LM/commit/72e290bf1f4bbf0c8047bb10a51da6ea6372e163))
* Guard TE import fix ([MR \!3666](https://github.com/NVIDIA/Megatron-LM/commit/ac198fc0d60a8c748597e01ca4c6887d3a7bcf3d))
* Breaking changes:
* `megatron.core.distributed.custom_fsdp` refactored as breaking change to `megatron.core.distributed.fsdp.src.megatron_fsdp`
* Known issues
## NVIDIA Megatron Core 0.13.0
* Support bf16 dtype for optimizer states to use precision-aware optimizer in TransformerEngine
* MoE
* Features:
* Flexible Asymmetric Virtual Pipeline Parallelism with Custom Pipeline Layout (--pipeline-model-parallel-layout)
* Add support to pass custom parallelism groups to MoE modules.
* Add Hybrid Shard Data-Parallel support for MoE models (--num-distributed-optimizer-instances)
* Support EP \+ custom FSDP training for DeepSeek-V3
* FP8 support for Multi-Token-Prediction
* Memory Optimization
* Fine-grained recomputation to reduce activation memory. (--recompute-modules with \--recompute-granularity selective)
* Memory efficient token permutation by moving the probs multiplication from unpermutation to activation function of GroupedMLP.
* Performance Optimization
* MLA RoPE fusion kernel and YARN embedding cache.
* FP8 padding optimization of MoE models by padding the routing map.
* Bug fixes:
* Fix the aux loss calculation when expert\_bias or group limited routing is used. This leads to load\_balancing\_loss values change compared to the previous version.
* Fix packed sequence support for MLA
* Known Issues:
* MTP is not compatible with flexible pipeline layout, will be fixed at \!3594.
* MTP convergence issue with TP2, will be fixed at \!3594.
## NVIDIA Megatron Core 0.12.0
* Add FP8 recipe selection to arguments (--fp8-recipe, --first-last-layers-bf16, --num-layers-at-start-in-bf16, --num-layers-at-end-in-bf16)
* Context parallel: fix loss scaling when calculate_per_token_loss=True
* Make the number of data parallel communication buckets configurable (--ddp-num-buckets, --ddp-pad-buckets-for-high-nccl-busbw)
* Inference
* Support in-flight batching and chunked KV cache
* Reduce memory usage,
* by not materializing full attention mask
* by only materializing logits for the last token during decode
* by removing an obsolete tensor reference
* Hybrid Model
* Inference
* Add CUDA graph support
* Change tools/run_mamba_text_generation_server.py to use megatron.core.inference
* Fix a shape issue when materializing logits for Mamba model
* Improve initialization of Mamba layers
* Add configuration switches (--mamba-state-dim, --mamba-head-dim, --mamba-num-groups, --is-hybrid-model)
* Make num_floating_point_operations work with hybrid model
* Make hybrid_conversion.py work with mixer that uses TE linear
* Add FP8 support
* Fix Mamba dt_bias tensor parallelism
* Support multimodal tokenizer
* Improve data parallelism scaling
* MoE
* Features:
* DeepEP support, compatible with all the parallelisms and token drop / dropless
* Important precision improvement: Enable FP32/FP64 routing and unpermutation using –moe-router-dtype. FP32 is recommended for all fine-grained MoE training
* CUDA Graph support for MoE
* Multi-Token Prediction (MTP) Support
* Fused indices_to_multihot kernel for DeepEP dispatcher
* Bug fixes:
* Fix Hang Issue with MoE+Dense Hybrid models
* Update theoretical memory and tflops estimation for MoE and MLA
* Fix MoE Aux loss scaling for per token loss
* Fixes for group limited routing and expert bias. We verified these fixes through dsv3 e2e verifications
* Known issues:
* The ckpt trained with Custom FSDP for MoE may not be compatible with 3D parallel training.
## NVIDIA Megatron Core 0.11.0
* Add multi datacenter training support though N/S connection
* MoE
* Features
* Support DeepSeek-V3 fine-tuning
* Aux-loss-free load balancing strategy
* Node-limited routing and Device-limited routing support.
* Tensor Parallelism support for MLA and Sequence Auxiliary Loss
* MTP (with TP and PP support) is coming soon.
* Permutation / Unpermutation fusion kernel from TransformerEngine.
* Uneven virtual pipeline parallel split support in first and last PP stage.
* Bug fixes:
* Fix the grad scale when TP != expert-TP and average_in_collective is enabled in DDP.
* Fix TEGroupedMLP distckpt compatibility issue with FP8 padding/unpadding.
* Known Issues:
* When training the Dense+MoE hybrid model, the process will hang if any PP rank does not have expert params.
* Add MX-FP16 support for optimizer and master weights
* CUDA Graph memory optimizations
* Enable UCC backend for PP communication
* Optimizer CPU offload support for memory savings
* Models
* Initial RADIO/CRADIO implementation
* llama3.2 support
* Hybrid Model
* Support quantization via TensorRT Model Optimizer
## NVIDIA Megatron Core 0.10.0
* Adding MLA to MCore
* Enable FP8 for GroupedMLP
* MoE Parallel Folding
* Enhance MoE Architecture: Support MoE Layer Frequency Patterns and Configurable MoE FFN Hidden Size
* Multimodal: NVLM training and evaluation support in MCore
* Mamba Hybrid
* Increase performance and reduce memory footprint of Triton language/compiler distributed caching
* Add more unit testing and fix bugs
## NVIDIA Megatron Core 0.9.0
* Uneven pipeline parallelism
* Enable pipeline parallelism where first and last ranks have fewer transformer layers than the intermediate ranks
* Per layer CUDAGraph support for GPT training with Transformer Engine modules
* Enable different TP sizes for the vision encoder
* Enable pipeline parallelism for T5 & Llava models
* Support multi-tile multi-image input in Llava models
* MoE
* FP8 support
* Runtime upcycling support
* Dispatcher implementation optimizations
* Shared expert support with overlapping optimizations
* Qwen Model support
* Known Issues
* When using sequence parallel, during the transformer block forward pass, dropout is not using the appropriate rng context.
* NVRx / Fault tolerance
* fault and hang detection in addition to existing straggler detection
* graceful exit and auto restart
## NVIDIA Megatron Core 0.8.0
* Multimodal
* Added initial support for training vision language models using the LLaVA architecture
* Added initial support for inference with multimodal inputs
* End-to-end multimodal example from data collection to training to evaluation is provided in examples/multimodal
* MoE
* Context Parallel support.
* Distributed checkpoint support for grouped GEMM.
* Mamba
## NVIDIA Megatron Core 0.7.0
* MoE
* Token drop support
* Several efficiency optimizations
* Improved model parallelism
* Memory optimizations
* Distributed checkpointing
* Enabled for Retro
* Asynchronous checkpoint saving
* Several minor bug fixes, speed improvements, and memory optimizations
## NVIDIA Megatron Core 0.6.0
* MoE (Mixture of Experts)
* Performance optimization
* Communication optimization for multi GPU and Single GPU
* 23% improvement (323 TFLOPS/GPU) over MCore 0.5.0 on Mixtral with Hopper BF16
* GroupedMLP enhancement for Hopper
* DP Overlapping. Support overlapping computation with gradient reduction and parameter gathering.
* All-to-All based Token Dispatcher
* Layer-wise logging for load balancing loss.
* Improved expert parallel support including distributed optimizer.
* Distributed optimizer
* RETRO
* Data processing
* BERT
* Distributed checkpointing
* Dist checkpointing
* PyTorch native distributed backend
* Improved saving/loading speed
* TensorRT-LLM Export
* Integration with TensorRT Model Optimizer Post-training quantization (PTQ)
* Text generation driver to perform PTQ in Megatron-LM
* Llama2 and Nemotron3-8b examples to use TensorRT-LLM unified build API to build engine after training.
* Several minor enhancements, bug fixes, and documentation updates
## NVIDIA Megatron Core 0.5.0
### Key Features and Enhancements
Megatron core documentation is now [live!](https://docs.nvidia.com/megatron-core/developer-guide/latest/user-guide/index.html#quick-start)
### Model Features
* MoE (Mixture of Experts)
* Support for Z-loss, Load balancing and Sinkhorn
* Layer and communications refactor
* Richer parallelism mappings and EP can be combined with other model parallel techniques for larger MoE variants, e.g. EP + TP + DP + SP + PP
* Token dropless architecture with Top-K routing
* Performance optimization with with GroupedGEMM when number of local experts is > 1
* Distributed checkpointing
* Interleaved rotary embedding
### Datasets
* Masked WordPiece datasets for BERT and T5
* Raw and mock datasets
### Parallelism
### Performance
* Activation offloading to CPU
* Rope and Swiglu fusion
* Sliding window attention (via Transformer Engine)
### General Improvements
* Timers
## NVIDIA Megatron Core 0.4.0
### Key Features and Enhancements
#### Models
* BERT
* RETRO
* T5
#### Parallelism
* Mixture of Experts support for GPT
* Model parallel efficient Distributed Data Parallel (DDP)
* Context Parallel (2D Tensor Parallel) support
#### Datasets
* GPT Dataset
* Blended Dataset
# Core
[Core-ADLR] @mcore-reviewers/core-adlr
megatron/core/
[Core-NeMo] @mcore-reviewers/core-nemo
megatron/core/
^[Core-MLPerf] @mcore-reviewers/mlperf
megatron/core/
[GPT] @mcore-reviewers/gpt
megatron/core/models/gpt/
[Multimodal] @mcore-reviewers/multi-modal
megatron/core/models/multimodal/
[Hybrid-mamba] @mcore-reviewers/hybrid-mamba
megatron/core/models/mamba/
# Distributed Checkpointing
[Distributed Checkpointing] @mcore-reviewers/dist-checkpointing
megatron/core/dist_checkpointing/
# Distributed Optimizer
[Distributed Optimizer] @mcore-reviewers/dist-optimizer
megatron/core/optimizer/distrib_optimizer/
# Quantization and Inference (QAT)
[Quantization and Inference (QAT)] @mcore-reviewers/quantization-and-inference
megatron/core/inference/modelopt_support
# Datasets
[Datasets] @mcore-reviewers/datasets
megatron/core/datasets/
# Parallelism
[Pipeline Parallelism] @mcore-reviewers/pipeline-parallelism
megatron/core/pipeline_parallel/
# Transformer
[Transformer] @mcore-reviewers/core-adlr @mcore-reviewers/core-nemo
megatron/core/transformer/
[MoE-ADLR] @mcore-reviewers/moe-adlr
megatron/core/transformer/moe/
[MoE-Moe] @mcore-reviewers/moe-moe
megatron/core/transformer/moe/
# Inference
[Inference] @mcore-reviewers/inference
megatron/core/inference/
# Parallel State
[ParallelState] @mcore-reviewers/core-adlr @mcore-reviewers/core-nemo
megatron/core/parallel_state.py
[Post-Training] @mcore-reviewers/post-training
megatron/core/post_training/
megatron/post_training
[CI][1] @mcore-reviewers/ci
.gitlab/
.github/
.gitlab-ci.yml
docker/
tests/unit_tests/run_ci_test.sh
tests/test_utils/python_scripts/
tests/functional_tests/python_test_utils/
tests/functional_tests/shell_test_utils/
megatron/core/transformer/transformer_block.py
megatron/core/transformer/transformer_layer.py
^[Tests][1] @mcore-reviewers/ci
tests/functional_tests/test_cases/
tests/functional_tests/recipes/
tests/unit_tests/
[RL] @mcore-reviewers/rl
megatron/rl/
examples/rl/
test/unit_tests/test_rl_utils.py
train_rl.py
# Contributing to Megatron-LM
This document outlines the processes and policies for issues and pull requests by non-NVIDIA contributors to the Megatron-LM github repository.
Everyone is welcome to contribute to the project but development of Megatron-LM continues internally at NVIDIA. When contributing it important to ensure that changes are in line with the project direction. Small changes to fix bugs are welcomed and appreciated. If proposing large architectural changes or changes for stylistic reasons open an issue first so we can discuss it.
PRs will first be pulled into NVIDIA's internal Megatron-LM repo and then pushed back out to the open github repo with proper credit given to the committers.
## Issue policy
Please do file any bugs you find, keeping the following in mind:
- If filing a bug, i.e. you have found something that doesn't work as expected, use the BUG template.
- If you've found a regression in speed or accuracy use the REGRESSION template.
- If you are requesting a new feature or modification of an existing feature use the ENHANCEMENT template.
- If opening an issue to ask a question no template is needed but please make your question as clear and concise as possible.
- One issue per bug. Putting multiple things in the same issue makes both discussion and completion unnecessarily complicated.
- Your bug is mostly likely to get attention from the development team quickly if we can easily reproduce it.
- Use proper spelling, grammar, and punctuation.
- Write in an authoritative and technical tone.
## Code submission policy
Here are some dos & don'ts to try and stick to:
### Do:
- Format new code in a style that is consistent with the file being changed. Megatron-LM doesn't (yet) have a style guide or enforced formatting.
- Split your changes into separate, atomic commits i.e. A commit per feature or fix.
- Make sure your commits are rebased on the master branch.
- Write the commit message subject line in the imperative mood ("Change the default argument for X", not "Changed the default argument for X").
- Write your commit messages in proper English, with care and punctuation.
- Check the spelling of your code, comments and commit messages.
### Don't:
- Submit code that's incompatible with the project licence.
- Touch anything outside the stated scope of the PR. This includes formatting changes to code not relevant to the PR.
- Iterate excessively on your design across multiple commits.
- Include commented-out code.
- Attempt large architectural changes without first opening an issue to discuss.
## Issue and Pull Request Q&A (Updated Jul 2023)
### I've submitted an issue and PR. When can I expect to get some feedback?
Megatron-LM is developed and maintained by a small team of researchers. We will endeavour to read and acknowledge all new issues and PRs within a week. A few rules of thumb:
- Reproducible bugs/regressions and bug/regression fixes are likely to get the attention of maintainers the quickest.
- Issues requesting an enhancement may only recieve acknowlegement that they've been read and may be closed with a "wontfix" label if they're not inline with the project direction. If they are acknowledged and remain open you can assume the maintainers agree they're a desirable feature.
- Support requests, i.e. requests for help running the code, have the lowest priority and will be responded to as maintainer time permits.
### If my issue or PR isn't getting attention, how long should I wait before pinging one of the project maintainers?
One week if there is no acknowledgement of the intial request.
### Who are the project maintainers I should ping?
The corresponding maintainers at this time are @jaredcasper and @jon-barker.
### Is there a policy for issues and PRs that haven't been touched in X days? Should they be closed?
Yes, starting in July 2023 we have a bot that will mark untouched PRs as "stale" after 60 days.
We have a long backlog of issues and PRs dating back 3.5 years. We are trying to triage these now by working backwards. Older issues we believe may still be relevant may recieve a request to re-test them with the latest code. If there's no response they may be closed. Again, if you they should be re-opened then just respond with a comment to that effect.
Thank-you!
\ No newline at end of file
The following applies to all files unless otherwise noted:
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--
This repository also contains code from Hugging Face Inc., Google Research,
Facebook (from their Fairseq, Dino, and ParlAI projects), Microsoft (from their
Swin-Transformer project), Philip Popien, the Mamba project (Tri Dao and
Albert Gu), and the Triton language and compiler project (Philippe Tillet and
OpenAI). Files from these organizations have notices at the top of each file.
Below are licenses used in those files, as indicated.
--------------------------------------------------------------------------------------
-- LICENSE FOR Facebook, huggingface, Google Research, LLaVA, Mamba, TinyZero and vLLM code --
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------
LICENSE FOR
Facebook, Inc. and its affiliates,
Meta Platforms, Inc. and its affiliates,
Microsoft Corporation,
OpenGVLab/InternVL,
Triton language and compiler,
and DeepSeek.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
include megatron/core/requirements.txt
include megatron/core/README.md
include megatron/core/package_info.py
recursive-include requirements *
<div align="center">
Megatron-LM & Megatron Core
===========================
<h4>GPU-optimized library for training transformer models at scale</h4>
[![Documentation](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](https://docs.nvidia.com/Megatron-Core/developer-guide/latest/index.html)
[![version](https://img.shields.io/badge/release-0.12.0-green)](./CHANGELOG.md)
[![license](https://img.shields.io/badge/license-Apache-blue)](./LICENSE)
<div align="left">
## ⚡ Quick Start
```bash
# 1. Install Megatron Core with required dependencies
pip install --no-build-isolation megatron-core[mlm,dev]
# 2. Clone repository for examples
git clone https://github.com/NVIDIA/Megatron-LM.git
cd Megatron-LM
pip install --no-build-isolation .[mlm,dev]
```
**→ [Complete Installation Guide](#installation)** - Docker, pip variants (dev,lts,etc.), source installation, and system requirements
# Latest News
- 📣 NEW! **[Megatron Dev Branch](https://github.com/NVIDIA/Megatron-LM/tree/dev)** - early access branch with experimental features.
- 🔄 **[Megatron Bridge](https://github.com/NVIDIA-NeMo/Megatron-Bridge)** - Bidirectional converter for interoperability between Hugging Face and Megatron checkpoints, featuring production-ready recipes for popular models.
- **[2025/08]** **[MoE Q3-Q4 2025 Roadmap](https://github.com/NVIDIA/Megatron-LM/issues/1729)** - Comprehensive roadmap for MoE features including DeepSeek-V3, Qwen3, advanced parallelism strategies, FP8 optimizations, and Blackwell performance enhancements.
- **[2025/08]** **[GPT-OSS Model](https://github.com/NVIDIA/Megatron-LM/issues/1739)** - Advanced features including YaRN RoPE scaling, attention sinks, and custom activation functions are being integrated into Megatron Core.
- **[2025/06]** **[Megatron MoE Model Zoo](https://github.com/yanring/Megatron-MoE-ModelZoo)** - Best practices and optimized configurations for training DeepSeek-V3, Mixtral, and Qwen3 MoE models with performance benchmarking and checkpoint conversion tools.
- **[2025/05]** Megatron Core v0.11.0 brings new capabilities for multi-data center LLM training ([blog](https://developer.nvidia.com/blog/turbocharge-llm-training-across-long-haul-data-center-networks-with-nvidia-nemo-framework/)).
<details>
<summary>Previous News</summary>
- **[2024/07]** Megatron Core v0.7 improves scalability and training resiliency and adds support for multimodal training ([blog](https://developer.nvidia.com/blog/train-generative-ai-models-more-efficiently-with-new-nvidia-Megatron-Core-functionalities/)).
- **[2024/06]** Megatron Core added supports for Mamba-based models. Check out our paper [An Empirical Study of Mamba-based Language Models](https://arxiv.org/pdf/2406.07887) and [code example](https://github.com/NVIDIA/Megatron-LM/tree/ssm/examples/mamba).
- **[2024/01 Announcement]** NVIDIA has released the core capabilities in **Megatron-LM** into [**Megatron Core**](https://github.com/NVIDIA/Megatron-LM/tree/main/megatron/core) in this repository. Megatron Core expands upon Megatron-LM's GPU-optimized techniques with more cutting-edge innovations on system-level optimizations, featuring composable and modular APIs. Explore the [Megatron Core intro](#Megatron Core) for more details.
</details>
<details>
<summary>Table of Contents</summary>
**Getting Started**
- [Quick Start](#-quick-start)
- [Latest News](#latest-news)
- [Megatron Overview](#megatron-overview)
- [Project Structure](#project-structure)
- [Megatron-LM: Reference Implementation](#megatron-lm-reference-implementation)
- [Megatron Core: Production Library](#megatron-core-production-library)
- [Installation](#installation)
- [Docker (Recommended)](#-docker-recommended)
- [Pip Installation](#-pip-installation)
- [Source Installation](#-source-installation)
- [System Requirements](#system-requirements)
**Core Features**
- [Performance Benchmarking](#performance-benchmarking)
- [Weak Scaling Results](#weak-scaling-results)
- [Strong Scaling Results](#strong-scaling-results)
- [Ecosystem Libraries](#ecosystem-libraries)
**Training**
- [Training](#training)
- [Getting Started](#getting-started)
- [Data Preparation](#data-preparation)
- [Parallelism Strategies](#parallelism-strategies)
- [Data Parallelism (DP)](#data-parallelism-dp)
- [Tensor Parallelism (TP)](#tensor-parallelism-tp)
- [Pipeline Parallelism (PP)](#pipeline-parallelism-pp)
- [Context Parallelism (CP)](#context-parallelism-cp)
- [Expert Parallelism (EP)](#expert-parallelism-ep)
- [Parallelism Selection Guide](#parallelism-selection-guide)
- [Performance Optimizations](#performance-optimizations)
**Resources**
- [Examples](./examples/) - Training scripts and tutorials
- [Documentation](https://docs.nvidia.com/Megatron-Core/) - Official docs
- [Roadmaps](#roadmaps) - Development roadmaps and feature tracking
- [Community & Support](#-community--support) - Get help and contribute
- [Getting Help](#getting-help)
- [Contributing](#contributing)
- [Citation](#citation)
</details>
# Megatron Overview
## Project Structure
```
Megatron-LM/
├── megatron/
│ ├── core/ # Megatron Core (kernels, parallelism, building blocks)
│ │ ├── models/ # Transformer models
│ │ ├── transformer/ # Transformer building blocks
│ │ ├── tensor_parallel/ # Tensor parallelism
│ │ ├── pipeline_parallel/ # Pipeline parallelism
│ │ ├── distributed/ # Distributed training (FSDP, DDP)
│ │ ├── optimizer/ # Optimizers
│ │ ├── datasets/ # Dataset loaders
│ │ ├── inference/ # Inference engines
│ │ └── export/ # Model export (e.g. TensorRT-LLM)
│ ├── training/ # Training scripts
│ ├── inference/ # Inference server
│ ├── legacy/ # Legacy components
│ └── post_training/ # Post-training (RLHF, etc.)
├── examples/ # Ready-to-use training examples
├── tools/ # Utility tools
├── tests/ # Comprehensive test suite
└── docs/ # Documentation
```
### Megatron-LM: Reference Implementation
**Reference implementation** that includes Megatron Core plus everything needed to train models.
**Best for:**
- **Training state-of-the-art foundation models** at scale with cutting-edge performance on latest NVIDIA hardware
- **Research teams** exploring new architectures and training techniques
- **Learning distributed training** concepts and best practices
- **Quick experimentation** with proven model configurations
**What you get:**
- Pre-configured training scripts for GPT, LLama, DeepSeek, Qwen, and more.
- End-to-end examples from data prep to evaluation
- Research-focused tools and utilities
### Megatron Core: Composable Library
**Composable library** with GPU-optimized building blocks for custom training frameworks.
**Best for:**
- **Framework developers** building on top of modular and optimized components
- **Research teams** needing custom training loops, optimizers, or data pipelines
- **ML engineers** requiring fault-tolerant training pipelines
**What you get:**
- Composable transformer building blocks (attention, MLP, etc.)
- Advanced parallelism strategies (TP, PP, DP, EP, CP)
- Pipeline schedules and distributed optimizers
- Mixed precision support (FP16, BF16, FP8)
- GPU-optimized kernels and memory management
- High-performance dataloaders and dataset utilities
- Model architectures (LLaMA, Qwen, GPT, Mixtral, Mamba, etc.)
## Ecosystem Libraries
**Libraries used by Megatron Core:**
- **[Megatron Energon](https://github.com/NVIDIA/Megatron-Energon)** 📣 **NEW!** - Multi-modal data loader (text, images, video, audio) with distributed loading and dataset blending
- **[Transformer Engine](https://github.com/NVIDIA/TransformerEngine)** - Optimized kernels and FP8 mixed precision support
- **[Resiliency Extension (NVRx)](https://github.com/NVIDIA/nvidia-resiliency-ext)** - Fault tolerant training with failure detection and recovery
**Libraries using Megatron Core:**
- **[Megatron Bridge](https://github.com/NVIDIA-NeMo/Megatron-Bridge)** - Training library with bidirectional Hugging Face ↔ Megatron checkpoint conversion, flexible training loops, and production-ready recipes
- **[NeMo RL](https://github.com/NVIDIA-NeMo/RL)** - Scalable toolkit for efficient reinforcement learning with RLHF, DPO, and other post-training methods
- **[NeMo Framework](https://docs.nvidia.com/nemo-framework/user-guide/latest/overview.html)** - Enterprise framework with cloud-native support and end-to-end examples
- **[TensorRT Model Optimizer (ModelOpt)](https://github.com/NVIDIA/TensorRT-Model-Optimizer)** - Model optimization toolkit for quantization, pruning, and distillation
**Compatible with:** [Hugging Face Accelerate](https://github.com/huggingface/accelerate), [Colossal-AI](https://github.com/hpcaitech/ColossalAI), [DeepSpeed](https://github.com/microsoft/DeepSpeed)
# Installation
## 🐳 Docker (Recommended)
We strongly recommend using the previous releases of [PyTorch NGC Container](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch) rather than the latest one for optimal compatibility with Megatron Core release and testing. Our releases are always based on the previous month's NGC container, so this ensures compatibility and stability.
**Note:** The NGC PyTorch container constraints the python environment globally via `PIP_CONSTRAINT`. In the following examples we will unset the variable.
This container comes with all dependencies pre-installed with compatible versions and optimized configurations for NVIDIA GPUs:
- PyTorch (latest stable version)
- CUDA, cuDNN, NCCL (latest stable versions)
- Support for FP8 on NVIDIA Hopper, Ada, and Blackwell GPUs
- For best performance, use NVIDIA Turing GPU architecture generations and later
```bash
# Run container with mounted directories
docker run --runtime --nvidia --gpus all -it --rm \
-v /path/to/megatron:/workspace/megatron \
-v /path/to/dataset:/workspace/dataset \
-v /path/to/checkpoints:/workspace/checkpoints \
-e PIP_CONSTRAINT= \
nvcr.io/nvidia/pytorch:25.04-py3
```
## Pip Installation
Megatron Core offers support for two NGC PyTorch containers:
- `dev`: Moving head that supports the most recent upstream dependencies
- `lts`: Long-term support of NGC PyTorch 24.01
Both containers can be combined with `mlm` which adds package dependencies for Megatron-LM on top of Megatron Core.
```bash
# Install the latest release dependencies
pip install "setuptools<80.0.0,>=77.0.0" "packaging>=24.2"
pip install --no-build-isolation megatron-core[dev]
# For running an M-LM application:
pip install "setuptools<80.0.0,>=77.0.0" "packaging>=24.2"
pip install --no-build-isolation megatron-core[mlm,dev]
```
```bash
# Install packages for LTS support NGC PyTorch 24.01
pip install "setuptools<80.0.0,>=77.0.0" "packaging>=24.2"
pip install --no-build-isolation megatron-core[lts]
# For running an M-LM application:
pip install "setuptools<80.0.0,>=77.0.0" "packaging>=24.2"
pip install --no-build-isolation megatron-core[mlm,lts]
```
For a version of Megatron Core with only torch, run:
```bash
pip install megatron-core
```
## System Requirements
### Hardware Requirements
- **FP8 Support**: NVIDIA Hopper, Ada, Blackwell GPUs
- **Recommended**: NVIDIA Turing architecture or later
### Software Requirements
- **CUDA/cuDNN/NCCL**: Latest stable versions
- **PyTorch**: Latest stable version
- **Transformer Engine**: Latest stable version
- **Python**: 3.12 recommended
# Performance Benchmarking
For our latest performance benchmarking results, please refer to [NVIDIA NeMo Framework Performance Summary](https://docs.nvidia.com/nemo-framework/user-guide/latest/performance/performance_summary.html).
Our codebase efficiently trains models from 2B to 462B parameters across thousands of GPUs, achieving up to **47% Model FLOP Utilization (MFU)** on H100 clusters.
![Model table](images/model_table.png)
**Benchmark Configuration:**
- **Vocabulary size**: 131,072 tokens
- **Sequence length**: 4096 tokens
- **Model scaling**: Varied hidden size, attention heads, and layers to achieve target parameter counts
- **Communication optimizations**: Fine-grained overlapping with DP (`--overlap-grad-reduce`, `--overlap-param-gather`), TP (`--tp-comm-overlap`), and PP (enabled by default)
**Key Results:**
- **6144 H100 GPUs**: Successfully benchmarked 462B parameter model training
- **Superlinear scaling**: MFU increases from 41% to 47-48% with model size
- **End-to-end measurement**: Throughputs include all operations (data loading, optimizer steps, communication, logging)
- **Production ready**: Full training pipeline with checkpointing and fault tolerance
- *Note: Performance results measured without training to convergence*
## Weak Scaling Results
Our weak scaled results show superlinear scaling (MFU increases from 41% for the smallest model considered to 47-48% for the largest models); this is because larger GEMMs have higher arithmetic intensity and are consequently more efficient to execute.
![Weak scaling](images/weak_scaling.png)
## Strong Scaling Results
We also strong scaled the standard GPT-3 model (our version has slightly more than 175 billion parameters due to larger vocabulary size) from 96 H100 GPUs to 4608 GPUs, using the same batch size of 1152 sequences throughout. Communication becomes more exposed at larger scale, leading to a reduction in MFU from 47% to 42%.
![Strong scaling](images/strong_scaling.png)
# Training
## Getting Started
### Simple Training Example
```bash
# Distributed training example (2 GPUs, mock data)
torchrun --nproc_per_node=2 examples/run_simple_mcore_train_loop.py
```
### LLama-3 Training Example
```bash
# 8 GPUs, FP8 precision, mock data
./examples/llama/train_llama3_8b_fp8.sh
```
## Data Preparation
### JSONL Data Format
```json
{"text": "Your training text here..."}
{"text": "Another training sample..."}
```
### Basic Preprocessing
```bash
python tools/preprocess_data.py \
--input data.jsonl \
--output-prefix processed_data \
--tokenizer-type HuggingFaceTokenizer \
--tokenizer-model /path/to/tokenizer.model \
--workers 8 \
--append-eod
```
### Key Arguments
- `--input`: Path to input JSON/JSONL file
- `--output-prefix`: Prefix for output binary files (.bin and .idx)
- `--tokenizer-type`: Tokenizer type (`HuggingFaceTokenizer`, `GPT2BPETokenizer`, etc.)
- `--tokenizer-model`: Path to tokenizer model file
- `--workers`: Number of parallel workers for processing
- `--append-eod`: Add end-of-document token
<!-- **→ [Complete Data Preparation Guide](./docs/data-preparation.md)** - Comprehensive guide covering advanced preprocessing, dataset collection, deduplication, and optimization strategies -->
# Parallelism Strategies
## Data Parallelism (DP)
### Standard Data Parallel
```bash
# Standard DDP - replicate model on each GPU
torchrun --nproc_per_node=8 pretrain_gpt.py \
--data-parallel-sharding-strategy no_shard
```
### Fully Sharded Data Parallel (FSDP)
```bash
# Megatron's optimized FSDP (~15% faster than PyTorch FSDP2)
--use-custom-fsdp
# PyTorch FSDP2
--use-torch-fsdp2
# Sharding strategies
--data-parallel-sharding-strategy optim # Shard optimizer states (ZeRO-1)
--data-parallel-sharding-strategy optim_grads # Shard gradients + optimizer (ZeRO-2)
--data-parallel-sharding-strategy optim_grads_params # Shard parameters + gradients + optimizer (ZeRO-3)
```
## Tensor Parallelism (TP)
Split individual model layers across GPUs:
```bash
--tensor-model-parallel-size 4 # 4-way tensor parallelism
--sequence-parallel # Enable sequence parallelism (recommended with TP)
```
## Pipeline Parallelism (PP)
Split model depth across GPUs:
```bash
--pipeline-model-parallel-size 8 # 8 pipeline stages
--virtual-pipeline-model-parallel-size 4 # Virtual pipeline for better load balancing
```
## Context Parallelism (CP)
Split long sequences across GPUs for handling long contexts:
```bash
--context-parallel-size 2 # 2-way context parallelism
--cp-comm-type p2p # Communication: p2p, a2a, allgather, a2a+p2p
--hierarchical-context-parallel-sizes 2 4 # Hierarchical context parallelism
```
## Expert Parallelism (EP)
For Mixture of Experts (MoE) models:
```bash
--expert-model-parallel-size 4 # 4-way expert parallelism
--num-experts 8 # 8 experts per MoE layer
--moe-grouped-gemm # Optimize expert computation
```
## Combining Parallelism Strategies
### Parallelism Selection Guide
Based on [NVIDIA NeMo production configurations](https://github.com/NVIDIA/NeMo/tree/main/scripts/performance/recommended_model_configs):
| Model | Size | GPUs | TP | PP | CP | EP | Notes |
|-------|------|------|----|----|----|----|-------|
| **LLama-3** | 8B | 8 | 1 | 1 | 2 | 1 | CP for long seqlen (8K) |
| **LLama-3** | 70B | 64 | 4 | 4 | 2 | 1 | TP+PP |
| **LLama-3.1** | 405B | 1024 | 8 | 8 | 2 | 1 | 3D parallelism for scale |
| **GPT-3** | 175B | 128-512 | 4 | 8 | 1 | 1 | Large model config |
| **Mixtral** | 8x7B | 64 | 1 | 4 | 1 | 8 | EP for MoE |
| **Mixtral** | 8x22B | 256 | 4 | 4 | 8 | 8 | Combined TP+EP for large MoE |
| **DeepSeek-V3** | 671B | 1024 | 2 | 16 | 1 | 64 | Large MoE config |
### MoE-Specific Requirements
**Important**: When combining Expert Parallelism (EP) with Tensor Parallelism (TP), **Sequence Parallelism (SP) must be enabled**.
## Performance Optimizations
| Feature | Flag | Benefit |
|---------|------|---------|
| **FlashAttention** | `--attention-backend` | Faster attention and lower memory usage |
| **FP8 Training** | `--fp8-hybrid` | Faster training |
| **Activation Checkpointing** | `--recompute-activations` | Reduced memory usage |
| **Data Parallelism Communication Overlap** | `--overlap-grad-reduce` | Faster distributed training |
| **Distributed Optimizer** | `--use-distributed-optimizer` | Reduced checkpointing time |
**→ [NVIDIA NeMo Framework Performance Tuning Guide](https://docs.nvidia.com/nemo-framework/user-guide/latest/performance/performance-guide.html#performance-tuning-guide)** - Comprehensive performance optimization guide covering advanced tuning techniques, communication overlaps, memory optimizations, and profiling options.
### FlashAttention
[FlashAttention](https://github.com/Dao-AILab/flash-attention) is a fast and memory-efficient attention algorithm. We recommend the default usage, which uses cuDNN for attention via Transformer Engine and provides up to 50% speedups on forward and 84% on backward propagation with FP8 kernels. The `flash-attn` package is also supported via `--use-flash-attn`.
### Mixed Precision Training
```bash
--fp16 # Standard FP16
--bf16 # BFloat16 (recommended for large models)
--fp8-hybrid # FP8 training (Hopper, Ada, and Blackwell GPUs)
```
### Activation Checkpointing and Recomputation
```bash
# For limited memory
--recompute-activations
# For extreme memory constraints
--recompute-granularity full \
--recompute-method uniform
```
### Data Parallelism Communication Overlap
```bash
--overlap-grad-reduce
--overlap-param-gather
```
### Distributed Optimizer
```bash
--use-distributed-optimizer
```
# Roadmaps
Stay up-to-date with our development roadmaps and planned features:
- **[MoE Q3-Q4 2025 Roadmap](https://github.com/NVIDIA/Megatron-LM/issues/1729)** - Comprehensive MoE feature development including DeepSeek-V3, Qwen3, advanced parallelism, FP8 optimizations, and Blackwell enhancements
- **[GPT-OSS Implementation Tracker](https://github.com/NVIDIA/Megatron-LM/issues/1739)** - Advanced features including YaRN RoPE scaling, attention sinks, and custom activation functions
*More roadmap trackers will be added soon.*
# Community & Support
## Getting Help
- 📖 **[Documentation](https://docs.nvidia.com/Megatron-Core/)** - Official documentation
- 🐛 **[Issues](https://github.com/NVIDIA/Megatron-LM/issues)** - Bug reports and feature requests
## Contributing
We ❤️ contributions! Ways to contribute:
- 🐛 **Report bugs** - Help us improve reliability
- 💡 **Suggest features** - Shape the future of Megatron Core
- 📝 **Improve docs** - Make Megatron Core more accessible
- 🔧 **Submit PRs** - Contribute code improvements
**→ [Contributing Guide](./CONTRIBUTING.md)**
## Citation
```bibtex
@article{megatron-lm,
title={Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism},
author={Shoeybi, Mohammad and Patwary, Mostofa and Puri, Raul and LeGresley, Patrick and Casper, Jared and Catanzaro, Bryan},
journal={arXiv preprint arXiv:1909.08053},
year={2019}
}
```
# syntax=docker/dockerfile:1.3-labs
ARG FROM_IMAGE_NAME
ARG WHEEL_DIR=/workspace/wheels
FROM ${FROM_IMAGE_NAME} as main
ENV PIP_CONSTRAINT=""
ENV DEBIAN_FRONTEND=noninteractive
ARG UV_VERSION=0.7.2
ARG YQ_VERSION=4.44.1
ENV PATH="/root/.local/bin:$PATH"
ARG UV_PROJECT_ENVIRONMENT=/opt/venv
ENV UV_PROJECT_ENVIRONMENT=${UV_PROJECT_ENVIRONMENT}
ENV VIRTUAL_ENV=$UV_PROJECT_ENVIRONMENT
ENV PATH="$UV_PROJECT_ENVIRONMENT/bin:$PATH"
ENV UV_LINK_MODE=copy
RUN bash -ex <<"EOF"
apt-get update
apt-get install -y --no-install-recommends gettext python3-venv psmisc
apt-get clean
python -m venv /opt/jet
wget https://github.com/mikefarah/yq/releases/download/v${YQ_VERSION}/yq_linux_amd64 -O /usr/local/bin/yq
chmod a+x /usr/local/bin/yq
curl -LsSf https://astral.sh/uv/${UV_VERSION}/install.sh | sh
EOF
ARG WHEEL_DIR
COPY README.md pyproject.toml uv.lock /workspace/
COPY megatron/core/__init__.py /workspace/megatron/core/
COPY megatron/core/package_info.py /workspace/megatron/core/
RUN --mount=type=cache,target=/root/.cache/uv \
bash -ex <<"EOF"
export NVTE_CUDA_ARCHS="80;90;100"
uv venv ${UV_PROJECT_ENVIRONMENT} --system-site-packages
uv sync --only-group build
uv sync --extra dev --extra mlm --link-mode copy --locked \
--no-install-package torch \
--no-install-package torchvision \
--no-install-package triton \
--no-install-package nvidia-cublas-cu12 \
--no-install-package nvidia-cuda-cupti-cu12 \
--no-install-package nvidia-cuda-nvrtc-cu12 \
--no-install-package nvidia-cuda-runtime-cu12 \
--no-install-package nvidia-cudnn-cu12 \
--no-install-package nvidia-cufft-cu12 \
--no-install-package nvidia-cufile-cu12 \
--no-install-package nvidia-curand-cu12 \
--no-install-package nvidia-cusolver-cu12 \
--no-install-package nvidia-cusparse-cu12 \
--no-install-package nvidia-cusparselt-cu12 \
--no-install-package nvidia-nccl-cu12
EOF
# Install DeepEP
COPY docker/patches/deepep.patch /workspace/deepep.patch
RUN bash -ex <<"EOF"
cd /workspace
uv pip install nvidia-nvshmem-cu13
pushd /opt/venv/lib/python3.12/site-packages/nvidia/nvshmem/lib/
ln -s libnvshmem_host.so.3 libnvshmem_host.so
popd
git clone --branch v1.2.1 https://github.com/deepseek-ai/DeepEP.git
pushd DeepEP
patch -p1 < /workspace/deepep.patch
popd
TORCH_CUDA_ARCH_LIST="9.0 10.0 12.0" uv pip install --no-build-isolation -v DeepEP/.
rm -rf DeepEP
EOF
COPY assets/ /opt/data/
ENV UV_PYTHON=$UV_PROJECT_ENVIRONMENT/bin/python
##### For NVIDIANS only #####
FROM main as jet
ARG JET_API_VERSION
ENV PATH="$PATH:/opt/jet/bin"
RUN --mount=type=secret,id=JET_INDEX_URLS bash -ex <<"EOF"
JET_INDEX_URLS=$(cat /run/secrets/JET_INDEX_URLS)
python -m venv /opt/jet
/opt/jet/bin/pip install --no-cache-dir $JET_INDEX_URLS \
jet-api==$JET_API_VERSION
EOF
RUN --mount=type=secret,id=JET_INDEX_URLS \
--mount=type=secret,id=LOGGER_INDEX_URL bash -ex <<"EOF"
JET_INDEX_URLS=$(cat /run/secrets/JET_INDEX_URLS)
LOGGER_INDEX_URL=$(cat /run/secrets/LOGGER_INDEX_URL)
uv pip install --no-cache-dir --upgrade $LOGGER_INDEX_URL "one-logger"
uv pip install --no-cache-dir --upgrade "setuptools<80.0.0"
uv pip install --no-cache-dir --upgrade $JET_INDEX_URLS "jet-client~=2.0"
EOF
###
# syntax=docker/dockerfile:1.3-labs
ARG FROM_IMAGE_NAME
ARG WHEEL_DIR=/workspace/wheels
FROM $FROM_IMAGE_NAME as build_mamba
WORKDIR /opt
ARG WHEEL_DIR
RUN MAMBA_FORCE_BUILD=TRUE pip3 wheel -v git+https://github.com/state-spaces/mamba.git@v2.0.3 -w $WHEEL_DIR
ARG FROM_IMAGE_NAME
FROM $FROM_IMAGE_NAME as build_causalconv1d
WORKDIR /opt
ARG WHEEL_DIR
RUN CAUSAL_CONV1D_FORCE_BUILD=TRUE pip3 wheel -v git+https://github.com/Dao-AILab/causal-conv1d.git@v1.2.2.post1 -w $WHEEL_DIR
FROM $FROM_IMAGE_NAME as build_groupedgemm
WORKDIR /opt
ARG WHEEL_DIR
RUN pip3 wheel -v git+https://github.com/fanshiqing/grouped_gemm@v1.1.2 -w $WHEEL_DIR
ARG FROM_IMAGE_NAME
FROM $FROM_IMAGE_NAME as main
ENV DEBIAN_FRONTEND=noninteractive
RUN bash -ex <<"EOF"
apt-get update
apt-get install -y --no-install-recommends gettext python3-venv psmisc
apt-get clean
python -m venv /opt/jet
wget https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -O /usr/local/bin/yq
chmod a+x /usr/local/bin/yq
EOF
ARG UV_VERSION=0.7.2
ENV PATH="/root/.local/bin:$PATH"
RUN curl -LsSf https://astral.sh/uv/${UV_VERSION}/install.sh | sh
ENV UV_PROJECT_ENVIRONMENT=/opt/venv
ENV PATH="$UV_PROJECT_ENVIRONMENT/bin:$PATH"
ENV VIRTUAL_ENV=$UV_PROJECT_ENVIRONMENT
ENV UV_LINK_MODE=copy
RUN
ARG WHEEL_DIR
COPY README.md pyproject.toml uv.lock /workspace/
COPY megatron/core/__init__.py /workspace/megatron/core/
COPY megatron/core/package_info.py /workspace/megatron/core/
COPY docker/common/ /workspace/docker/common/
COPY --from=build_mamba $WHEEL_DIR/*.whl $WHEEL_DIR/
COPY --from=build_causalconv1d $WHEEL_DIR/*.whl $WHEEL_DIR/
COPY --from=build_groupedgemm $WHEEL_DIR/*.whl $WHEEL_DIR/
RUN bash -ex <<"EOF"
uv venv ${UV_PROJECT_ENVIRONMENT} --system-site-packages
uv sync --extra lts --extra mlm --link-mode copy --locked \
--no-install-package torch \
--no-install-package torchvision \
--no-install-package triton \
--no-install-package nvidia-cublas-cu12 \
--no-install-package nvidia-cuda-cupti-cu12 \
--no-install-package nvidia-cuda-nvrtc-cu12 \
--no-install-package nvidia-cuda-runtime-cu12 \
--no-install-package nvidia-cudnn-cu12 \
--no-install-package nvidia-cufft-cu12 \
--no-install-package nvidia-cufile-cu12 \
--no-install-package nvidia-curand-cu12 \
--no-install-package nvidia-cusolver-cu12 \
--no-install-package nvidia-cusparse-cu12 \
--no-install-package nvidia-cusparselt-cu12 \
--no-install-package nvidia-nccl-cu12
bash docker/common/install_source_wheels.sh --input-wheel-dir $WHEEL_DIR/ --environment lts
EOF
ENV PYTHONPATH="/opt/megatron-lm:$PYTHONPATH"
COPY assets/ /opt/data/
ENV UV_PYTHON=$UV_PROJECT_ENVIRONMENT/bin/python
##### For NVIDIANS only #####
FROM main as jet
ARG JET_API_VERSION
ENV PATH="$PATH:/opt/jet/bin"
RUN --mount=type=secret,id=JET_INDEX_URLS bash -ex <<"EOF"
JET_INDEX_URLS=$(cat /run/secrets/JET_INDEX_URLS)
python -m venv /opt/jet
/opt/jet/bin/pip install --no-cache-dir $JET_INDEX_URLS \
jet-api==$JET_API_VERSION
EOF
RUN --mount=type=secret,id=JET_INDEX_URLS \
--mount=type=secret,id=LOGGER_INDEX_URL bash -ex <<"EOF"
JET_INDEX_URLS=$(cat /run/secrets/JET_INDEX_URLS)
LOGGER_INDEX_URL=$(cat /run/secrets/LOGGER_INDEX_URL)
uv pip install --no-cache-dir --upgrade $LOGGER_INDEX_URL "one-logger"
uv pip install --no-cache-dir --upgrade "setuptools<80.0.0"
uv pip install --no-cache-dir --upgrade $JET_INDEX_URLS "jet-client~=2.0"
EOF
###
\ No newline at end of file
# syntax=docker/dockerfile:1.3-labs
ARG FROM_IMAGE_NAME
FROM ${FROM_IMAGE_NAME} as main
RUN apt-get update && \
apt-get install -y --no-install-recommends gettext && \
apt-get clean && \
wget https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -O /usr/local/bin/yq && \
chmod a+x /usr/local/bin/yq
##### For NVIDIANS only #####
FROM main as jet
ARG JET_API_VERSION
RUN --mount=type=secret,id=JET_INDEX_URLS \
JET_INDEX_URLS=$(cat /run/secrets/JET_INDEX_URLS) && \
pip install --no-cache-dir jet-api==$JET_API_VERSION "jet-client~=3.0" --upgrade $JET_INDEX_URLS
ENV PATH="$PATH:/opt/jet/bin"
###
# syntax=docker/dockerfile:experimental
ARG FROM_IMAGE_NAME
FROM $FROM_IMAGE_NAME as main
ENV DEBIAN_FRONTEND=noninteractive
ARG UV_VERSION=0.7.2
ARG YQ_VERSION=4.44.1
ENV PATH="/root/.local/bin:$PATH"
ENV UV_PROJECT_ENVIRONMENT=/opt/venv
ENV PATH="$UV_PROJECT_ENVIRONMENT/bin:$PATH"
ENV UV_LINK_MODE=copy
RUN curl -LsSf https://astral.sh/uv/${UV_VERSION}/install.sh | sh
WORKDIR /opt/megatron-lm
COPY pyproject.toml uv.lock /opt/megatron-lm/
COPY megatron/core/package_info.py megatron/core/__init__.py /opt/megatron-lm/megatron/core/
RUN uv sync --locked --only-group linting --only-group test --only-group ci
##### For NVIDIANS only #####
FROM main as jet
ARG JET_API_VERSION
RUN --mount=type=secret,id=JET_INDEX_URLS \
JET_INDEX_URLS=$(cat /run/secrets/JET_INDEX_URLS) && \
uv pip install --no-cache-dir "jet-client~=2.0" --upgrade $JET_INDEX_URLS
#!/bin/bash
set -xeuo pipefail # Exit immediately if a command exits with a non-zero status
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--base-image)
BASE_IMAGE="$2"
shift 2
;;
--python-version)
PYTHON_VERSION="$2"
shift 2
;;
--environment)
ENVIRONMENT="$2"
shift 2
;;
--use-uv)
USE_UV="true"
shift 1
;;
*)
echo "Unknown option: $1"
echo "Usage: $0 --base-image {pytorch|ubuntu} [--use-uv] [--python-version] [--environment]"
exit 1
;;
esac
done
if [[ -z "${PYTHON_VERSION:-}" ]]; then
PYTHON_VERSION="3.12"
fi
if [[ -z "${USE_UV:-}" ]]; then
USE_UV="false"
fi
# Validate base image argument
if [[ -z "${BASE_IMAGE:-}" || -z "${ENVIRONMENT:-}" ]]; then
echo "Error: --base-image argument is required"
echo "Usage: $0 --base-image {pytorch|ubuntu} --environment {dev|lts}"
exit 1
fi
if [[ "$BASE_IMAGE" != "pytorch" && "$BASE_IMAGE" != "ubuntu" ]]; then
echo "Error: --base-image must be either 'pytorch' or 'ubuntu'"
echo "Usage: $0 --base-image {pytorch|ubuntu}"
exit 1
fi
if [[ "$ENVIRONMENT" != "dev" && "$ENVIRONMENT" != "lts" ]]; then
echo "Error: --environment must be either 'dev' or 'lts'"
echo "Usage: $0 --environment {dev|lts}"
exit 1
fi
main() {
if [[ -n "${PAT:-}" ]]; then
echo -e "machine github.com\n login token\n password $PAT" >~/.netrc
chmod 600 ~/.netrc
fi
# Install dependencies
export DEBIAN_FRONTEND=noninteractive
# Install Python
apt-get update
apt-get install -y software-properties-common
add-apt-repository ppa:deadsnakes/ppa -y
apt-get install -y python$PYTHON_VERSION-dev python$PYTHON_VERSION-venv
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python$PYTHON_VERSION 1
# Install tools
apt-get update
apt-get install -y wget curl git cmake
# Install CUDA
if [[ "$BASE_IMAGE" == "ubuntu" ]]; then
rm /etc/apt/sources.list.d/cuda*.list || true
rm /etc/apt/sources.list.d/nvidia-cuda.list || true
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/x86_64/cuda-keyring_1.1-1_all.deb
dpkg -i cuda-keyring_1.1-1_all.deb
rm cuda-keyring_1.1-1_all.deb
apt-get update
apt-get install -y cuda-toolkit-12-8 cudnn-cuda-12 libcudnn9-cuda-12 libcutlass-dev
fi
# Clean up
apt-get clean
unset PIP_CONSTRAINT
if [[ "$USE_UV" == "true" ]]; then
if [[ "$BASE_IMAGE" == "pytorch" ]]; then
UV_ARGS=(
"--no-install-package" "torch"
"--no-install-package" "torchvision"
"--no-install-package" "triton"
"--no-install-package" "nvidia-cublas-cu12"
"--no-install-package" "nvidia-cuda-cupti-cu12"
"--no-install-package" "nvidia-cuda-nvrtc-cu12"
"--no-install-package" "nvidia-cuda-runtime-cu12"
"--no-install-package" "nvidia-cudnn-cu12"
"--no-install-package" "nvidia-cufft-cu12"
"--no-install-package" "nvidia-cufile-cu12"
"--no-install-package" "nvidia-curand-cu12"
"--no-install-package" "nvidia-cusolver-cu12"
"--no-install-package" "nvidia-cusparse-cu12"
"--no-install-package" "nvidia-cusparselt-cu12"
"--no-install-package" "nvidia-nccl-cu12"
)
else
UV_ARGS=()
fi
# Install uv
UV_VERSION="0.7.2"
curl -LsSf https://astral.sh/uv/${UV_VERSION}/install.sh | sh
# Create virtual environment and install dependencies
uv venv ${UV_PROJECT_ENVIRONMENT} --system-site-packages
# Install dependencies
uv sync --locked --only-group build ${UV_ARGS[@]}
uv sync \
--link-mode copy \
--locked \
--extra ${ENVIRONMENT} \
--all-groups ${UV_ARGS[@]}
# Install the package
uv pip install --no-deps -e .
else
python3 -m venv $UV_PROJECT_ENVIRONMENT
. $UV_PROJECT_ENVIRONMENT/bin/activate
pip install --pre --no-cache-dir --upgrade pip
pip install --pre --no-cache-dir torch pybind11 wheel_stub ninja wheel packaging "setuptools>=77.0.0"
pip install --pre --no-cache-dir --no-build-isolation .
fi
}
# Call the main function
main "$@"
#!/bin/bash
set -xeuo pipefail # Exit immediately if a command exits with a non-zero status
INPUT_WHEEL_DIR=$(pwd)/wheels
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--input-wheel-dir)
INPUT_WHEEL_DIR="$2"
shift 2
;;
--environment)
ENVIRONMENT="$2"
shift 2
;;
*)
echo "Unknown option: $1"
echo "Usage: $0 --input-wheel-dir DIR"
exit 1
;;
esac
done
# Check if required arguments are provided
if [ -z "$INPUT_WHEEL_DIR" ] || [ -z "$ENVIRONMENT" ]; then
echo "Error: --input-wheel-dir and --environment are required"
echo "Usage: $0 --input-wheel-dir DIR --environment ENV"
exit 1
fi
if [ "$ENVIRONMENT" = "dev" ]; then
TE_WHEEL=$(ls $INPUT_WHEEL_DIR/transformer_engine*.whl) || true
[ -z "$TE_WHEEL" ] && TE_WHEEL=$(bash docker/common/build_te.sh --output-wheel-dir $INPUT_WHEEL_DIR | tail -n 1)
fi
MAMBA_WHEEL=$(ls $INPUT_WHEEL_DIR/mamba*.whl) || true
[ -z "$MAMBA_WHEEL" ] && MAMBA_WHEEL=$(bash docker/common/build_mamba.sh --output-wheel-dir $INPUT_WHEEL_DIR | tail -n 1)
CAUSALCONV1D_WHEEL=$(ls $INPUT_WHEEL_DIR/causal_conv1d*.whl) || true
[ -z "$CAUSALCONV1D_WHEEL" ] && CAUSALCONV1D_WHEEL=$(bash docker/common/build_causalconv1d.sh --output-wheel-dir $INPUT_WHEEL_DIR | tail -n 1)
GROUPEDGEMM_WHEEL=$(ls $INPUT_WHEEL_DIR/grouped_gemm*.whl) || true
[ -z "$GROUPEDGEMM_WHEEL" ] && GROUPEDGEMM_WHEEL=$(bash docker/common/build_groupedgemm.sh --output-wheel-dir $INPUT_WHEEL_DIR | tail -n 1)
# Override deps that are already present in the base image
# only for dev
if [ "$ENVIRONMENT" = "dev" ]; then
uv pip install --no-cache-dir --no-deps $TE_WHEEL
fi
# Install heavy optional deps like mamba, causalconv1d, groupedgemm
uv pip install --no-cache-dir \
$MAMBA_WHEEL \
$CAUSALCONV1D_WHEEL \
$GROUPEDGEMM_WHEEL \
"setuptools<80.0.0"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment