Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/source/en/_toctree.yml
Original file line number Diff line number Diff line change
Expand Up @@ -648,6 +648,8 @@
title: OLMoE
- local: model_doc/open-llama
title: Open-Llama
- local: model_doc/openpangu_moe
title: OpenPanguMoE
- local: model_doc/opt
title: OPT
- local: model_doc/pegasus
Expand Down
71 changes: 71 additions & 0 deletions docs/source/en/model_doc/openpangu_moe.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
<!--Copyright 2025 the HuggingFace Team. All rights reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.


⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer.

-->


# OpenPanguMoE

## Overview

The OpenPanguMoE model was proposed in [<INSERT PAPER NAME HERE>](<INSERT PAPER LINK HERE>) by <INSERT AUTHORS HERE>.
<INSERT SHORT SUMMARY HERE>

The abstract from the paper is the following:

<INSERT PAPER ABSTRACT HERE>

Tips:

<INSERT TIPS ABOUT MODEL HERE>

This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/<INSERT YOUR HF USERNAME HERE>).
The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>).

## Usage examples

<INSERT SOME NICE EXAMPLES HERE>
Comment on lines +25 to +41
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Don't forget the docs :D


## OpenpanguMoeConfig

[[autodoc]] OpenpanguMoeConfig

## OpenpanguMoeForCausalLM

[[autodoc]] OpenpanguMoeForCausalLM

## OpenpanguMoeModel

[[autodoc]] OpenpanguMoeModel
- forward

## OpenpanguMoePreTrainedModel

[[autodoc]] OpenpanguMoePreTrainedModel
- forward

## OpenpanguMoeForSequenceClassification

[[autodoc]] OpenpanguMoeForSequenceClassification

## OpenpanguMoeForQuestionAnswering

[[autodoc]] OpenpanguMoeForQuestionAnswering

## OpenpanguMoeForTokenClassification

[[autodoc]] OpenpanguMoeForTokenClassification
1 change: 1 addition & 0 deletions src/transformers/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,7 @@
from .omdet_turbo import *
from .oneformer import *
from .openai import *
from .openpangu_moe import *
from .opt import *
from .ovis2 import *
from .owlv2 import *
Expand Down
2 changes: 2 additions & 0 deletions src/transformers/models/auto/configuration_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,7 @@
("oneformer", "OneFormerConfig"),
("open-llama", "OpenLlamaConfig"),
("openai-gpt", "OpenAIGPTConfig"),
("openpangu_moe", "OpenPanguMoEConfig"),
("opt", "OPTConfig"),
("ovis2", "Ovis2Config"),
("owlv2", "Owlv2Config"),
Expand Down Expand Up @@ -763,6 +764,7 @@
("oneformer", "OneFormer"),
("open-llama", "OpenLlama"),
("openai-gpt", "OpenAI GPT"),
("openpangu_moe", "OpenPanguMoE"),
("opt", "OPT"),
("ovis2", "Ovis2"),
("owlv2", "OWLv2"),
Expand Down
2 changes: 2 additions & 0 deletions src/transformers/models/auto/modeling_auto.py
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No associated tokenizer? Would go to tokenization_auto

Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,7 @@ class _BaseModelWithGenerate(PreTrainedModel, GenerationMixin):
("oneformer", "OneFormerModel"),
("open-llama", "OpenLlamaModel"),
("openai-gpt", "OpenAIGPTModel"),
("openpangu_moe", "OpenPanguMoEModel"),
("opt", "OPTModel"),
("ovis2", "Ovis2Model"),
("owlv2", "Owlv2Model"),
Expand Down Expand Up @@ -733,6 +734,7 @@ class _BaseModelWithGenerate(PreTrainedModel, GenerationMixin):
("olmoe", "OlmoeForCausalLM"),
("open-llama", "OpenLlamaForCausalLM"),
("openai-gpt", "OpenAIGPTLMHeadModel"),
("openpangu_moe", "OpenPanguMoEForCausalLM"),
("opt", "OPTForCausalLM"),
("pegasus", "PegasusForCausalLM"),
("persimmon", "PersimmonForCausalLM"),
Expand Down
29 changes: 29 additions & 0 deletions src/transformers/models/openpangu_moe/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# coding=utf-8
# Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import TYPE_CHECKING

from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure


if TYPE_CHECKING:
from .configuration_openpangu_moe import *
from .modeling_openpangu_moe import *
else:
import sys

_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
# coding=utf-8
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.
Comment on lines +1 to +2
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please use the full licence, elsewhere too

Suggested change
# coding=utf-8
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.
# coding=utf-8
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


"""openPanguUltraMoE 718B model configuration"""

from ...configuration_utils import PreTrainedConfig

class OpenPanguMoEConfig(PreTrainedConfig):

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Docstrings! You can also use modular for this if you find models similar enough

model_type = "pangu_ultra_moe"
keys_to_ignore_at_inference = ["past_key_values"]

def __init__(
self,
vocab_size=153600,
hidden_size=7680,
intermediate_size=18432,
moe_intermediate_size=2048,
num_hidden_layers=61,
num_mtp_layers=1,
num_attention_heads=128,
num_key_value_heads=128,
num_shared_experts=1,
num_routed_experts=256,
routed_scaling_factor=2.5,
attention_kv_lora_dim=512,
attention_q_lora_dim=1536,
attention_qk_rope_dim=64,
attention_v_dim=128,
attention_qk_dim=128,
Comment on lines +26 to +30
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

MLA from deepseek v3? Let's align names with things that exist elsewhere

num_experts_per_tok=8,
num_dense_layers=3,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looking into modular in a second but does this mean we have 3 dense first and then moe only afterwards? We might be able to use Ernie 4.5 Moe convention then with start/end idx.

norm_topk_prob=True,
hidden_act="silu",
max_position_embeddings=131072,
initializer_range=0.02,
rms_norm_eps=1e-5,
use_cache=True,
pad_token_id=None,
bos_token_id=0,
eos_token_id=1,
tie_word_embeddings=False,
rope_theta=25600000,
attention_dropout=0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta

self.num_dense_layers = num_dense_layers
self.intermediate_size = intermediate_size
self.moe_intermediate_size = moe_intermediate_size
self.num_shared_experts = num_shared_experts
self.num_routed_experts = num_routed_experts
self.routed_scaling_factor = routed_scaling_factor
self.num_experts_per_tok = num_experts_per_tok
self.norm_topk_prob = norm_topk_prob
self.attention_kv_lora_dim = attention_kv_lora_dim
self.attention_q_lora_dim = attention_q_lora_dim
self.attention_qk_rope_dim = attention_qk_rope_dim
self.attention_v_dim = attention_v_dim
self.attention_qk_dim = attention_qk_dim
self.attention_dropout = attention_dropout
self.num_mtp_layers = num_mtp_layers

super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
Loading